diff --git a/src/runtime/go.mod b/src/runtime/go.mod index 90e8e4040..46eed044e 100644 --- a/src/runtime/go.mod +++ b/src/runtime/go.mod @@ -37,17 +37,22 @@ require ( github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8 github.com/sirupsen/logrus v1.8.1 github.com/smartystreets/goconvey v1.6.4 // indirect - github.com/stretchr/testify v1.6.1 + github.com/stretchr/testify v1.7.0 github.com/urfave/cli v1.22.2 github.com/vishvananda/netlink v1.1.1-0.20210924202909-187053b97868 github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae - go.opentelemetry.io/otel v0.15.0 - go.opentelemetry.io/otel/exporters/trace/jaeger v0.15.0 - go.opentelemetry.io/otel/sdk v0.15.0 + go.opencensus.io v0.23.0 // indirect + go.opentelemetry.io/otel v1.0.0 + go.opentelemetry.io/otel/exporters/jaeger v1.0.0 + go.opentelemetry.io/otel/sdk v1.0.0 + go.opentelemetry.io/otel/trace v1.0.0 golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 - golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 + golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93 golang.org/x/sys v0.0.0-20210426230700-d19ff857e887 - google.golang.org/grpc v1.33.2 + golang.org/x/text v0.3.5 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb // indirect + google.golang.org/grpc v1.36.0 k8s.io/apimachinery v0.20.6 k8s.io/cri-api v0.20.6 ) diff --git a/src/runtime/go.sum b/src/runtime/go.sum index 370694310..d3852708e 100644 --- a/src/runtime/go.sum +++ b/src/runtime/go.sum @@ -45,7 +45,6 @@ github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBp github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DataDog/sketches-go v0.0.1/go.mod h1:Q5DbzQ+3AkgGwymQO7aZFNP7ns2lZKGtvRBzRXfdi60= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= @@ -67,13 +66,10 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= -github.com/apache/thrift v0.13.0 h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI= -github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -100,6 +96,7 @@ github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJ github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= @@ -179,6 +176,7 @@ github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -295,9 +293,11 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -517,8 +517,10 @@ github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoH github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -550,14 +552,17 @@ go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opentelemetry.io/otel v0.15.0 h1:CZFy2lPhxd4HlhZnYK8gRyDotksO3Ip9rBweY1vVYJw= -go.opentelemetry.io/otel v0.15.0/go.mod h1:e4GKElweB8W2gWUqbghw0B8t5MCTccc9212eNHnOHwA= -go.opentelemetry.io/otel/exporters/trace/jaeger v0.15.0 h1:OZY+lMaUJiJ6ls1dDtqKhSPJWEVNytLuRUrzuR852jc= -go.opentelemetry.io/otel/exporters/trace/jaeger v0.15.0/go.mod h1:4DeFMzRzr2l5o/Lzh4GfOYEH+mnf7ZrEGDzzJEP6ta8= -go.opentelemetry.io/otel/sdk v0.15.0 h1:Hf2dl1Ad9Hn03qjcAuAq51GP5Pv1SV5puIkS2nRhdd8= -go.opentelemetry.io/otel/sdk v0.15.0/go.mod h1:Qudkwgq81OcA9GYVlbyZ62wkLieeS1eWxIL0ufxgwoc= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/otel v1.0.0 h1:qTTn6x71GVBvoafHK/yaRUmFzI4LcONZD0/kXxl5PHI= +go.opentelemetry.io/otel v1.0.0/go.mod h1:AjRVh9A5/5DE7S+mZtTR6t8vpKKryam+0lREnfmS4cg= +go.opentelemetry.io/otel/exporters/jaeger v1.0.0 h1:cLhx8llHw02h5JTqGqaRbYn+QVKHmrzD9vEbKnSPk5U= +go.opentelemetry.io/otel/exporters/jaeger v1.0.0/go.mod h1:q10N1AolE1JjqKrFJK2tYw0iZpmX+HBaXBtuCzRnBGQ= +go.opentelemetry.io/otel/sdk v1.0.0 h1:BNPMYUONPNbLneMttKSjQhOTlFLOD9U22HNG1KrIN2Y= +go.opentelemetry.io/otel/sdk v1.0.0/go.mod h1:PCrDHlSy5x1kjezSdL37PhbFUMjrsLRshJ2zCzeXwbM= +go.opentelemetry.io/otel/trace v1.0.0 h1:TSBr8GTEtKevYMG/2d21M989r5WJYVimhTHBKVEZuh4= +go.opentelemetry.io/otel/trace v1.0.0/go.mod h1:PXTWqayeFUlJV1YDNhsJYB184+IvAH814St6o6ajzIs= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= @@ -646,8 +651,8 @@ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 h1:ld7aEMNHoBnnDAX15v1T6z31v8HwR2A9FYOuAhWqkwc= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93 h1:alLDrZkL34Y2bnGHfvC1CYBRBXCXgx8AC2vY4MRtYX4= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -657,7 +662,6 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -705,7 +709,6 @@ golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -716,6 +719,7 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426230700-d19ff857e887 h1:dXfMednGJh/SUUFjTLsWJz3P+TQt9qnR11GgeI3vWKs= golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -724,8 +728,9 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -776,7 +781,6 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -799,15 +803,14 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.32.0 h1:Le77IccnTqEa8ryp9wIpX5W3zYm7Gf9LhOp9PHcwFts= -google.golang.org/api v0.32.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -823,9 +826,9 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.2 h1:EQyQC3sa8M+p6Ulc8yy9SWSS2GVwyRc83gAbG8lrl4o= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.36.0 h1:o1bcQ6imQMIOpdrO3SWf2z5RV72WbDwdXuK0MDlc8As= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/src/runtime/pkg/katautils/katatrace/tracing.go b/src/runtime/pkg/katautils/katatrace/tracing.go index 2c8b3945c..d976d8bda 100644 --- a/src/runtime/pkg/katautils/katatrace/tracing.go +++ b/src/runtime/pkg/katautils/katatrace/tracing.go @@ -11,11 +11,12 @@ import ( "github.com/sirupsen/logrus" "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/exporters/trace/jaeger" - "go.opentelemetry.io/otel/label" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/exporters/jaeger" "go.opentelemetry.io/otel/propagation" - export "go.opentelemetry.io/otel/sdk/export/trace" + "go.opentelemetry.io/otel/sdk/resource" sdktrace "go.opentelemetry.io/otel/sdk/trace" + semconv "go.opentelemetry.io/otel/semconv/v1.4.0" "go.opentelemetry.io/otel/trace" otelTrace "go.opentelemetry.io/otel/trace" ) @@ -26,10 +27,10 @@ import ( // https: //github.com/kata-containers/tests/blob/master/tracing/tracing-test.sh type kataSpanExporter struct{} -var _ export.SpanExporter = (*kataSpanExporter)(nil) +var _ sdktrace.SpanExporter = (*kataSpanExporter)(nil) // ExportSpans exports SpanData to Jaeger. -func (e *kataSpanExporter) ExportSpans(ctx context.Context, spans []*export.SpanData) error { +func (e *kataSpanExporter) ExportSpans(ctx context.Context, spans []sdktrace.ReadOnlySpan) error { for _, span := range spans { kataTraceLogger.Tracef("Reporting span %+v", span) } @@ -40,9 +41,9 @@ func (e *kataSpanExporter) Shutdown(ctx context.Context) error { return nil } -// tracerCloser contains a copy of the closer returned by createTracer() which -// is used by stopTracing(). -var tracerCloser func() +// tp is the trace provider created in CreateTracer() and used in StopTracing() +// to flush and shutdown all spans. +var tp *sdktrace.TracerProvider var kataTraceLogger = logrus.NewEntry(logrus.New()) @@ -62,10 +63,10 @@ type JaegerConfig struct { } // CreateTracer create a tracer -func CreateTracer(name string, config *JaegerConfig) (func(), error) { +func CreateTracer(name string, config *JaegerConfig) (*sdktrace.TracerProvider, error) { if !tracing { otel.SetTracerProvider(trace.NewNoopTracerProvider()) - return func() {}, nil + return nil, nil } // build kata exporter to log reporting span records @@ -77,37 +78,32 @@ func CreateTracer(name string, config *JaegerConfig) (func(), error) { collectorEndpoint = "http://localhost:14268/api/traces" } - jaegerExporter, err := jaeger.NewRawExporter( - jaeger.WithCollectorEndpoint(collectorEndpoint, + jaegerExporter, err := jaeger.New( + jaeger.WithCollectorEndpoint(jaeger.WithEndpoint(collectorEndpoint), jaeger.WithUsername(config.JaegerUser), jaeger.WithPassword(config.JaegerPassword), - ), jaeger.WithProcess(jaeger.Process{ - ServiceName: name, - Tags: []label.KeyValue{ - label.String("exporter", "jaeger"), - label.String("lib", "opentelemetry"), - }, - })) + ), + ) + if err != nil { return nil, err } // build tracer provider, that combining both jaeger exporter and kata exporter. tp := sdktrace.NewTracerProvider( - sdktrace.WithConfig( - sdktrace.Config{ - DefaultSampler: sdktrace.AlwaysSample(), - }, - ), + sdktrace.WithSampler(sdktrace.AlwaysSample()), sdktrace.WithSyncer(kataExporter), sdktrace.WithSyncer(jaegerExporter), + sdktrace.WithResource(resource.NewSchemaless( + semconv.ServiceNameKey.String(name), + attribute.String("exporter", "jaeger"), + attribute.String("lib", "opentelemetry"), + )), ) - tracerCloser = jaegerExporter.Flush - otel.SetTracerProvider(tp) otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{})) - return tracerCloser, nil + return tp, nil } // StopTracing ends all tracing, reporting the spans to the collector. @@ -122,9 +118,8 @@ func StopTracing(ctx context.Context) { } // report all possible spans to the collector - if tracerCloser != nil { - tracerCloser() - } + tp.ForceFlush(ctx) + tp.Shutdown(ctx) } // Trace creates a new tracing span based on the specified name and parent context. @@ -139,12 +134,12 @@ func Trace(parent context.Context, logger *logrus.Entry, name string, tags ...ma parent = context.Background() } - var otelTags []label.KeyValue + var otelTags []attribute.KeyValue // do not append tags if tracing is disabled if tracing { for _, tagSet := range tags { for k, v := range tagSet { - otelTags = append(otelTags, label.Key(k).String(v)) + otelTags = append(otelTags, attribute.Key(k).String(v)) } } } @@ -170,45 +165,31 @@ func addTag(span otelTrace.Span, key string, value interface{}) { return } if value == nil { - span.SetAttributes(label.String(key, "nil")) + span.SetAttributes(attribute.String(key, "nil")) return } switch value := value.(type) { case string: - span.SetAttributes(label.String(key, value)) + span.SetAttributes(attribute.String(key, value)) case bool: - span.SetAttributes(label.Bool(key, value)) + span.SetAttributes(attribute.Bool(key, value)) case int: - span.SetAttributes(label.Int(key, value)) + span.SetAttributes(attribute.Int(key, value)) case int8: - span.SetAttributes(label.Int(key, int(value))) + span.SetAttributes(attribute.Int(key, int(value))) case int16: - span.SetAttributes(label.Int(key, int(value))) - case int32: - span.SetAttributes(label.Int32(key, value)) + span.SetAttributes(attribute.Int(key, int(value))) case int64: - span.SetAttributes(label.Int64(key, value)) - case uint: - span.SetAttributes(label.Uint(key, value)) - case uint8: - span.SetAttributes(label.Uint(key, uint(value))) - case uint16: - span.SetAttributes(label.Uint(key, uint(value))) - case uint32: - span.SetAttributes(label.Uint32(key, value)) - case uint64: - span.SetAttributes(label.Uint64(key, value)) - case float32: - span.SetAttributes(label.Float32(key, value)) + span.SetAttributes(attribute.Int64(key, value)) case float64: - span.SetAttributes(label.Float64(key, value)) + span.SetAttributes(attribute.Float64(key, value)) default: content, err := json.Marshal(value) if content == nil && err == nil { - span.SetAttributes(label.String(key, "nil")) + span.SetAttributes(attribute.String(key, "nil")) } else if content != nil && err == nil { - span.SetAttributes(label.String(key, string(content))) + span.SetAttributes(attribute.String(key, string(content))) } else { kataTraceLogger.WithField("type", "bug").Error("span attribute value error") } diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/binary_protocol.go b/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/binary_protocol.go deleted file mode 100644 index 93ae898cf..000000000 --- a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/binary_protocol.go +++ /dev/null @@ -1,505 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bytes" - "context" - "encoding/binary" - "errors" - "fmt" - "io" - "math" -) - -type TBinaryProtocol struct { - trans TRichTransport - origTransport TTransport - strictRead bool - strictWrite bool - buffer [64]byte -} - -type TBinaryProtocolFactory struct { - strictRead bool - strictWrite bool -} - -func NewTBinaryProtocolTransport(t TTransport) *TBinaryProtocol { - return NewTBinaryProtocol(t, false, true) -} - -func NewTBinaryProtocol(t TTransport, strictRead, strictWrite bool) *TBinaryProtocol { - p := &TBinaryProtocol{origTransport: t, strictRead: strictRead, strictWrite: strictWrite} - if et, ok := t.(TRichTransport); ok { - p.trans = et - } else { - p.trans = NewTRichTransport(t) - } - return p -} - -func NewTBinaryProtocolFactoryDefault() *TBinaryProtocolFactory { - return NewTBinaryProtocolFactory(false, true) -} - -func NewTBinaryProtocolFactory(strictRead, strictWrite bool) *TBinaryProtocolFactory { - return &TBinaryProtocolFactory{strictRead: strictRead, strictWrite: strictWrite} -} - -func (p *TBinaryProtocolFactory) GetProtocol(t TTransport) TProtocol { - return NewTBinaryProtocol(t, p.strictRead, p.strictWrite) -} - -/** - * Writing Methods - */ - -func (p *TBinaryProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error { - if p.strictWrite { - version := uint32(VERSION_1) | uint32(typeId) - e := p.WriteI32(int32(version)) - if e != nil { - return e - } - e = p.WriteString(name) - if e != nil { - return e - } - e = p.WriteI32(seqId) - return e - } else { - e := p.WriteString(name) - if e != nil { - return e - } - e = p.WriteByte(int8(typeId)) - if e != nil { - return e - } - e = p.WriteI32(seqId) - return e - } - return nil -} - -func (p *TBinaryProtocol) WriteMessageEnd() error { - return nil -} - -func (p *TBinaryProtocol) WriteStructBegin(name string) error { - return nil -} - -func (p *TBinaryProtocol) WriteStructEnd() error { - return nil -} - -func (p *TBinaryProtocol) WriteFieldBegin(name string, typeId TType, id int16) error { - e := p.WriteByte(int8(typeId)) - if e != nil { - return e - } - e = p.WriteI16(id) - return e -} - -func (p *TBinaryProtocol) WriteFieldEnd() error { - return nil -} - -func (p *TBinaryProtocol) WriteFieldStop() error { - e := p.WriteByte(STOP) - return e -} - -func (p *TBinaryProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error { - e := p.WriteByte(int8(keyType)) - if e != nil { - return e - } - e = p.WriteByte(int8(valueType)) - if e != nil { - return e - } - e = p.WriteI32(int32(size)) - return e -} - -func (p *TBinaryProtocol) WriteMapEnd() error { - return nil -} - -func (p *TBinaryProtocol) WriteListBegin(elemType TType, size int) error { - e := p.WriteByte(int8(elemType)) - if e != nil { - return e - } - e = p.WriteI32(int32(size)) - return e -} - -func (p *TBinaryProtocol) WriteListEnd() error { - return nil -} - -func (p *TBinaryProtocol) WriteSetBegin(elemType TType, size int) error { - e := p.WriteByte(int8(elemType)) - if e != nil { - return e - } - e = p.WriteI32(int32(size)) - return e -} - -func (p *TBinaryProtocol) WriteSetEnd() error { - return nil -} - -func (p *TBinaryProtocol) WriteBool(value bool) error { - if value { - return p.WriteByte(1) - } - return p.WriteByte(0) -} - -func (p *TBinaryProtocol) WriteByte(value int8) error { - e := p.trans.WriteByte(byte(value)) - return NewTProtocolException(e) -} - -func (p *TBinaryProtocol) WriteI16(value int16) error { - v := p.buffer[0:2] - binary.BigEndian.PutUint16(v, uint16(value)) - _, e := p.trans.Write(v) - return NewTProtocolException(e) -} - -func (p *TBinaryProtocol) WriteI32(value int32) error { - v := p.buffer[0:4] - binary.BigEndian.PutUint32(v, uint32(value)) - _, e := p.trans.Write(v) - return NewTProtocolException(e) -} - -func (p *TBinaryProtocol) WriteI64(value int64) error { - v := p.buffer[0:8] - binary.BigEndian.PutUint64(v, uint64(value)) - _, err := p.trans.Write(v) - return NewTProtocolException(err) -} - -func (p *TBinaryProtocol) WriteDouble(value float64) error { - return p.WriteI64(int64(math.Float64bits(value))) -} - -func (p *TBinaryProtocol) WriteString(value string) error { - e := p.WriteI32(int32(len(value))) - if e != nil { - return e - } - _, err := p.trans.WriteString(value) - return NewTProtocolException(err) -} - -func (p *TBinaryProtocol) WriteBinary(value []byte) error { - e := p.WriteI32(int32(len(value))) - if e != nil { - return e - } - _, err := p.trans.Write(value) - return NewTProtocolException(err) -} - -/** - * Reading methods - */ - -func (p *TBinaryProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) { - size, e := p.ReadI32() - if e != nil { - return "", typeId, 0, NewTProtocolException(e) - } - if size < 0 { - typeId = TMessageType(size & 0x0ff) - version := int64(int64(size) & VERSION_MASK) - if version != VERSION_1 { - return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Bad version in ReadMessageBegin")) - } - name, e = p.ReadString() - if e != nil { - return name, typeId, seqId, NewTProtocolException(e) - } - seqId, e = p.ReadI32() - if e != nil { - return name, typeId, seqId, NewTProtocolException(e) - } - return name, typeId, seqId, nil - } - if p.strictRead { - return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Missing version in ReadMessageBegin")) - } - name, e2 := p.readStringBody(size) - if e2 != nil { - return name, typeId, seqId, e2 - } - b, e3 := p.ReadByte() - if e3 != nil { - return name, typeId, seqId, e3 - } - typeId = TMessageType(b) - seqId, e4 := p.ReadI32() - if e4 != nil { - return name, typeId, seqId, e4 - } - return name, typeId, seqId, nil -} - -func (p *TBinaryProtocol) ReadMessageEnd() error { - return nil -} - -func (p *TBinaryProtocol) ReadStructBegin() (name string, err error) { - return -} - -func (p *TBinaryProtocol) ReadStructEnd() error { - return nil -} - -func (p *TBinaryProtocol) ReadFieldBegin() (name string, typeId TType, seqId int16, err error) { - t, err := p.ReadByte() - typeId = TType(t) - if err != nil { - return name, typeId, seqId, err - } - if t != STOP { - seqId, err = p.ReadI16() - } - return name, typeId, seqId, err -} - -func (p *TBinaryProtocol) ReadFieldEnd() error { - return nil -} - -var invalidDataLength = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Invalid data length")) - -func (p *TBinaryProtocol) ReadMapBegin() (kType, vType TType, size int, err error) { - k, e := p.ReadByte() - if e != nil { - err = NewTProtocolException(e) - return - } - kType = TType(k) - v, e := p.ReadByte() - if e != nil { - err = NewTProtocolException(e) - return - } - vType = TType(v) - size32, e := p.ReadI32() - if e != nil { - err = NewTProtocolException(e) - return - } - if size32 < 0 { - err = invalidDataLength - return - } - size = int(size32) - return kType, vType, size, nil -} - -func (p *TBinaryProtocol) ReadMapEnd() error { - return nil -} - -func (p *TBinaryProtocol) ReadListBegin() (elemType TType, size int, err error) { - b, e := p.ReadByte() - if e != nil { - err = NewTProtocolException(e) - return - } - elemType = TType(b) - size32, e := p.ReadI32() - if e != nil { - err = NewTProtocolException(e) - return - } - if size32 < 0 { - err = invalidDataLength - return - } - size = int(size32) - - return -} - -func (p *TBinaryProtocol) ReadListEnd() error { - return nil -} - -func (p *TBinaryProtocol) ReadSetBegin() (elemType TType, size int, err error) { - b, e := p.ReadByte() - if e != nil { - err = NewTProtocolException(e) - return - } - elemType = TType(b) - size32, e := p.ReadI32() - if e != nil { - err = NewTProtocolException(e) - return - } - if size32 < 0 { - err = invalidDataLength - return - } - size = int(size32) - return elemType, size, nil -} - -func (p *TBinaryProtocol) ReadSetEnd() error { - return nil -} - -func (p *TBinaryProtocol) ReadBool() (bool, error) { - b, e := p.ReadByte() - v := true - if b != 1 { - v = false - } - return v, e -} - -func (p *TBinaryProtocol) ReadByte() (int8, error) { - v, err := p.trans.ReadByte() - return int8(v), err -} - -func (p *TBinaryProtocol) ReadI16() (value int16, err error) { - buf := p.buffer[0:2] - err = p.readAll(buf) - value = int16(binary.BigEndian.Uint16(buf)) - return value, err -} - -func (p *TBinaryProtocol) ReadI32() (value int32, err error) { - buf := p.buffer[0:4] - err = p.readAll(buf) - value = int32(binary.BigEndian.Uint32(buf)) - return value, err -} - -func (p *TBinaryProtocol) ReadI64() (value int64, err error) { - buf := p.buffer[0:8] - err = p.readAll(buf) - value = int64(binary.BigEndian.Uint64(buf)) - return value, err -} - -func (p *TBinaryProtocol) ReadDouble() (value float64, err error) { - buf := p.buffer[0:8] - err = p.readAll(buf) - value = math.Float64frombits(binary.BigEndian.Uint64(buf)) - return value, err -} - -func (p *TBinaryProtocol) ReadString() (value string, err error) { - size, e := p.ReadI32() - if e != nil { - return "", e - } - if size < 0 { - err = invalidDataLength - return - } - - return p.readStringBody(size) -} - -func (p *TBinaryProtocol) ReadBinary() ([]byte, error) { - size, e := p.ReadI32() - if e != nil { - return nil, e - } - if size < 0 { - return nil, invalidDataLength - } - - isize := int(size) - buf := make([]byte, isize) - _, err := io.ReadFull(p.trans, buf) - return buf, NewTProtocolException(err) -} - -func (p *TBinaryProtocol) Flush(ctx context.Context) (err error) { - return NewTProtocolException(p.trans.Flush(ctx)) -} - -func (p *TBinaryProtocol) Skip(fieldType TType) (err error) { - return SkipDefaultDepth(p, fieldType) -} - -func (p *TBinaryProtocol) Transport() TTransport { - return p.origTransport -} - -func (p *TBinaryProtocol) readAll(buf []byte) error { - _, err := io.ReadFull(p.trans, buf) - return NewTProtocolException(err) -} - -const readLimit = 32768 - -func (p *TBinaryProtocol) readStringBody(size int32) (value string, err error) { - if size < 0 { - return "", nil - } - - var ( - buf bytes.Buffer - e error - b []byte - ) - - switch { - case int(size) <= len(p.buffer): - b = p.buffer[:size] // avoids allocation for small reads - case int(size) < readLimit: - b = make([]byte, size) - default: - b = make([]byte, readLimit) - } - - for size > 0 { - _, e = io.ReadFull(p.trans, b) - buf.Write(b) - if e != nil { - break - } - size -= readLimit - if size < readLimit && size > 0 { - b = b[:size] - } - } - return buf.String(), NewTProtocolException(e) -} diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/debug_protocol.go b/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/debug_protocol.go deleted file mode 100644 index 57943e0f3..000000000 --- a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/debug_protocol.go +++ /dev/null @@ -1,270 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "log" -) - -type TDebugProtocol struct { - Delegate TProtocol - LogPrefix string -} - -type TDebugProtocolFactory struct { - Underlying TProtocolFactory - LogPrefix string -} - -func NewTDebugProtocolFactory(underlying TProtocolFactory, logPrefix string) *TDebugProtocolFactory { - return &TDebugProtocolFactory{ - Underlying: underlying, - LogPrefix: logPrefix, - } -} - -func (t *TDebugProtocolFactory) GetProtocol(trans TTransport) TProtocol { - return &TDebugProtocol{ - Delegate: t.Underlying.GetProtocol(trans), - LogPrefix: t.LogPrefix, - } -} - -func (tdp *TDebugProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error { - err := tdp.Delegate.WriteMessageBegin(name, typeId, seqid) - log.Printf("%sWriteMessageBegin(name=%#v, typeId=%#v, seqid=%#v) => %#v", tdp.LogPrefix, name, typeId, seqid, err) - return err -} -func (tdp *TDebugProtocol) WriteMessageEnd() error { - err := tdp.Delegate.WriteMessageEnd() - log.Printf("%sWriteMessageEnd() => %#v", tdp.LogPrefix, err) - return err -} -func (tdp *TDebugProtocol) WriteStructBegin(name string) error { - err := tdp.Delegate.WriteStructBegin(name) - log.Printf("%sWriteStructBegin(name=%#v) => %#v", tdp.LogPrefix, name, err) - return err -} -func (tdp *TDebugProtocol) WriteStructEnd() error { - err := tdp.Delegate.WriteStructEnd() - log.Printf("%sWriteStructEnd() => %#v", tdp.LogPrefix, err) - return err -} -func (tdp *TDebugProtocol) WriteFieldBegin(name string, typeId TType, id int16) error { - err := tdp.Delegate.WriteFieldBegin(name, typeId, id) - log.Printf("%sWriteFieldBegin(name=%#v, typeId=%#v, id%#v) => %#v", tdp.LogPrefix, name, typeId, id, err) - return err -} -func (tdp *TDebugProtocol) WriteFieldEnd() error { - err := tdp.Delegate.WriteFieldEnd() - log.Printf("%sWriteFieldEnd() => %#v", tdp.LogPrefix, err) - return err -} -func (tdp *TDebugProtocol) WriteFieldStop() error { - err := tdp.Delegate.WriteFieldStop() - log.Printf("%sWriteFieldStop() => %#v", tdp.LogPrefix, err) - return err -} -func (tdp *TDebugProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error { - err := tdp.Delegate.WriteMapBegin(keyType, valueType, size) - log.Printf("%sWriteMapBegin(keyType=%#v, valueType=%#v, size=%#v) => %#v", tdp.LogPrefix, keyType, valueType, size, err) - return err -} -func (tdp *TDebugProtocol) WriteMapEnd() error { - err := tdp.Delegate.WriteMapEnd() - log.Printf("%sWriteMapEnd() => %#v", tdp.LogPrefix, err) - return err -} -func (tdp *TDebugProtocol) WriteListBegin(elemType TType, size int) error { - err := tdp.Delegate.WriteListBegin(elemType, size) - log.Printf("%sWriteListBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err) - return err -} -func (tdp *TDebugProtocol) WriteListEnd() error { - err := tdp.Delegate.WriteListEnd() - log.Printf("%sWriteListEnd() => %#v", tdp.LogPrefix, err) - return err -} -func (tdp *TDebugProtocol) WriteSetBegin(elemType TType, size int) error { - err := tdp.Delegate.WriteSetBegin(elemType, size) - log.Printf("%sWriteSetBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err) - return err -} -func (tdp *TDebugProtocol) WriteSetEnd() error { - err := tdp.Delegate.WriteSetEnd() - log.Printf("%sWriteSetEnd() => %#v", tdp.LogPrefix, err) - return err -} -func (tdp *TDebugProtocol) WriteBool(value bool) error { - err := tdp.Delegate.WriteBool(value) - log.Printf("%sWriteBool(value=%#v) => %#v", tdp.LogPrefix, value, err) - return err -} -func (tdp *TDebugProtocol) WriteByte(value int8) error { - err := tdp.Delegate.WriteByte(value) - log.Printf("%sWriteByte(value=%#v) => %#v", tdp.LogPrefix, value, err) - return err -} -func (tdp *TDebugProtocol) WriteI16(value int16) error { - err := tdp.Delegate.WriteI16(value) - log.Printf("%sWriteI16(value=%#v) => %#v", tdp.LogPrefix, value, err) - return err -} -func (tdp *TDebugProtocol) WriteI32(value int32) error { - err := tdp.Delegate.WriteI32(value) - log.Printf("%sWriteI32(value=%#v) => %#v", tdp.LogPrefix, value, err) - return err -} -func (tdp *TDebugProtocol) WriteI64(value int64) error { - err := tdp.Delegate.WriteI64(value) - log.Printf("%sWriteI64(value=%#v) => %#v", tdp.LogPrefix, value, err) - return err -} -func (tdp *TDebugProtocol) WriteDouble(value float64) error { - err := tdp.Delegate.WriteDouble(value) - log.Printf("%sWriteDouble(value=%#v) => %#v", tdp.LogPrefix, value, err) - return err -} -func (tdp *TDebugProtocol) WriteString(value string) error { - err := tdp.Delegate.WriteString(value) - log.Printf("%sWriteString(value=%#v) => %#v", tdp.LogPrefix, value, err) - return err -} -func (tdp *TDebugProtocol) WriteBinary(value []byte) error { - err := tdp.Delegate.WriteBinary(value) - log.Printf("%sWriteBinary(value=%#v) => %#v", tdp.LogPrefix, value, err) - return err -} - -func (tdp *TDebugProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error) { - name, typeId, seqid, err = tdp.Delegate.ReadMessageBegin() - log.Printf("%sReadMessageBegin() (name=%#v, typeId=%#v, seqid=%#v, err=%#v)", tdp.LogPrefix, name, typeId, seqid, err) - return -} -func (tdp *TDebugProtocol) ReadMessageEnd() (err error) { - err = tdp.Delegate.ReadMessageEnd() - log.Printf("%sReadMessageEnd() err=%#v", tdp.LogPrefix, err) - return -} -func (tdp *TDebugProtocol) ReadStructBegin() (name string, err error) { - name, err = tdp.Delegate.ReadStructBegin() - log.Printf("%sReadStructBegin() (name%#v, err=%#v)", tdp.LogPrefix, name, err) - return -} -func (tdp *TDebugProtocol) ReadStructEnd() (err error) { - err = tdp.Delegate.ReadStructEnd() - log.Printf("%sReadStructEnd() err=%#v", tdp.LogPrefix, err) - return -} -func (tdp *TDebugProtocol) ReadFieldBegin() (name string, typeId TType, id int16, err error) { - name, typeId, id, err = tdp.Delegate.ReadFieldBegin() - log.Printf("%sReadFieldBegin() (name=%#v, typeId=%#v, id=%#v, err=%#v)", tdp.LogPrefix, name, typeId, id, err) - return -} -func (tdp *TDebugProtocol) ReadFieldEnd() (err error) { - err = tdp.Delegate.ReadFieldEnd() - log.Printf("%sReadFieldEnd() err=%#v", tdp.LogPrefix, err) - return -} -func (tdp *TDebugProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, err error) { - keyType, valueType, size, err = tdp.Delegate.ReadMapBegin() - log.Printf("%sReadMapBegin() (keyType=%#v, valueType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, keyType, valueType, size, err) - return -} -func (tdp *TDebugProtocol) ReadMapEnd() (err error) { - err = tdp.Delegate.ReadMapEnd() - log.Printf("%sReadMapEnd() err=%#v", tdp.LogPrefix, err) - return -} -func (tdp *TDebugProtocol) ReadListBegin() (elemType TType, size int, err error) { - elemType, size, err = tdp.Delegate.ReadListBegin() - log.Printf("%sReadListBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err) - return -} -func (tdp *TDebugProtocol) ReadListEnd() (err error) { - err = tdp.Delegate.ReadListEnd() - log.Printf("%sReadListEnd() err=%#v", tdp.LogPrefix, err) - return -} -func (tdp *TDebugProtocol) ReadSetBegin() (elemType TType, size int, err error) { - elemType, size, err = tdp.Delegate.ReadSetBegin() - log.Printf("%sReadSetBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err) - return -} -func (tdp *TDebugProtocol) ReadSetEnd() (err error) { - err = tdp.Delegate.ReadSetEnd() - log.Printf("%sReadSetEnd() err=%#v", tdp.LogPrefix, err) - return -} -func (tdp *TDebugProtocol) ReadBool() (value bool, err error) { - value, err = tdp.Delegate.ReadBool() - log.Printf("%sReadBool() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - return -} -func (tdp *TDebugProtocol) ReadByte() (value int8, err error) { - value, err = tdp.Delegate.ReadByte() - log.Printf("%sReadByte() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - return -} -func (tdp *TDebugProtocol) ReadI16() (value int16, err error) { - value, err = tdp.Delegate.ReadI16() - log.Printf("%sReadI16() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - return -} -func (tdp *TDebugProtocol) ReadI32() (value int32, err error) { - value, err = tdp.Delegate.ReadI32() - log.Printf("%sReadI32() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - return -} -func (tdp *TDebugProtocol) ReadI64() (value int64, err error) { - value, err = tdp.Delegate.ReadI64() - log.Printf("%sReadI64() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - return -} -func (tdp *TDebugProtocol) ReadDouble() (value float64, err error) { - value, err = tdp.Delegate.ReadDouble() - log.Printf("%sReadDouble() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - return -} -func (tdp *TDebugProtocol) ReadString() (value string, err error) { - value, err = tdp.Delegate.ReadString() - log.Printf("%sReadString() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - return -} -func (tdp *TDebugProtocol) ReadBinary() (value []byte, err error) { - value, err = tdp.Delegate.ReadBinary() - log.Printf("%sReadBinary() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - return -} -func (tdp *TDebugProtocol) Skip(fieldType TType) (err error) { - err = tdp.Delegate.Skip(fieldType) - log.Printf("%sSkip(fieldType=%#v) (err=%#v)", tdp.LogPrefix, fieldType, err) - return -} -func (tdp *TDebugProtocol) Flush(ctx context.Context) (err error) { - err = tdp.Delegate.Flush(ctx) - log.Printf("%sFlush() (err=%#v)", tdp.LogPrefix, err) - return -} - -func (tdp *TDebugProtocol) Transport() TTransport { - return tdp.Delegate.Transport() -} diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/deserializer.go b/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/deserializer.go deleted file mode 100644 index 91a0983a4..000000000 --- a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/deserializer.go +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -type TDeserializer struct { - Transport TTransport - Protocol TProtocol -} - -func NewTDeserializer() *TDeserializer { - var transport TTransport - transport = NewTMemoryBufferLen(1024) - - protocol := NewTBinaryProtocolFactoryDefault().GetProtocol(transport) - - return &TDeserializer{ - transport, - protocol} -} - -func (t *TDeserializer) ReadString(msg TStruct, s string) (err error) { - err = nil - if _, err = t.Transport.Write([]byte(s)); err != nil { - return - } - if err = msg.Read(t.Protocol); err != nil { - return - } - return -} - -func (t *TDeserializer) Read(msg TStruct, b []byte) (err error) { - err = nil - if _, err = t.Transport.Write(b); err != nil { - return - } - if err = msg.Read(t.Protocol); err != nil { - return - } - return -} diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/field.go b/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/field.go deleted file mode 100644 index 9d6652550..000000000 --- a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/field.go +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -// Helper class that encapsulates field metadata. -type field struct { - name string - typeId TType - id int -} - -func newField(n string, t TType, i int) *field { - return &field{name: n, typeId: t, id: i} -} - -func (p *field) Name() string { - if p == nil { - return "" - } - return p.name -} - -func (p *field) TypeId() TType { - if p == nil { - return TType(VOID) - } - return p.typeId -} - -func (p *field) Id() int { - if p == nil { - return -1 - } - return p.id -} - -func (p *field) String() string { - if p == nil { - return "" - } - return "" -} - -var ANONYMOUS_FIELD *field - -type fieldSlice []field - -func (p fieldSlice) Len() int { - return len(p) -} - -func (p fieldSlice) Less(i, j int) bool { - return p[i].Id() < p[j].Id() -} - -func (p fieldSlice) Swap(i, j int) { - p[i], p[j] = p[j], p[i] -} - -func init() { - ANONYMOUS_FIELD = newField("", STOP, 0) -} diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/framed_transport.go b/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/framed_transport.go deleted file mode 100644 index 34275b5f4..000000000 --- a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/framed_transport.go +++ /dev/null @@ -1,187 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bufio" - "bytes" - "context" - "encoding/binary" - "fmt" - "io" -) - -const DEFAULT_MAX_LENGTH = 16384000 - -type TFramedTransport struct { - transport TTransport - buf bytes.Buffer - reader *bufio.Reader - frameSize uint32 //Current remaining size of the frame. if ==0 read next frame header - buffer [4]byte - maxLength uint32 -} - -type tFramedTransportFactory struct { - factory TTransportFactory - maxLength uint32 -} - -func NewTFramedTransportFactory(factory TTransportFactory) TTransportFactory { - return &tFramedTransportFactory{factory: factory, maxLength: DEFAULT_MAX_LENGTH} -} - -func NewTFramedTransportFactoryMaxLength(factory TTransportFactory, maxLength uint32) TTransportFactory { - return &tFramedTransportFactory{factory: factory, maxLength: maxLength} -} - -func (p *tFramedTransportFactory) GetTransport(base TTransport) (TTransport, error) { - tt, err := p.factory.GetTransport(base) - if err != nil { - return nil, err - } - return NewTFramedTransportMaxLength(tt, p.maxLength), nil -} - -func NewTFramedTransport(transport TTransport) *TFramedTransport { - return &TFramedTransport{transport: transport, reader: bufio.NewReader(transport), maxLength: DEFAULT_MAX_LENGTH} -} - -func NewTFramedTransportMaxLength(transport TTransport, maxLength uint32) *TFramedTransport { - return &TFramedTransport{transport: transport, reader: bufio.NewReader(transport), maxLength: maxLength} -} - -func (p *TFramedTransport) Open() error { - return p.transport.Open() -} - -func (p *TFramedTransport) IsOpen() bool { - return p.transport.IsOpen() -} - -func (p *TFramedTransport) Close() error { - return p.transport.Close() -} - -func (p *TFramedTransport) Read(buf []byte) (l int, err error) { - if p.frameSize == 0 { - p.frameSize, err = p.readFrameHeader() - if err != nil { - return - } - } - if p.frameSize < uint32(len(buf)) { - frameSize := p.frameSize - tmp := make([]byte, p.frameSize) - l, err = p.Read(tmp) - copy(buf, tmp) - if err == nil { - // Note: It's important to only return an error when l - // is zero. - // In io.Reader.Read interface, it's perfectly fine to - // return partial data and nil error, which means - // "This is all the data we have right now without - // blocking. If you need the full data, call Read again - // or use io.ReadFull instead". - // Returning partial data with an error actually means - // there's no more data after the partial data just - // returned, which is not true in this case - // (it might be that the other end just haven't written - // them yet). - if l == 0 { - err = NewTTransportExceptionFromError(fmt.Errorf("Not enough frame size %d to read %d bytes", frameSize, len(buf))) - } - return - } - } - got, err := p.reader.Read(buf) - p.frameSize = p.frameSize - uint32(got) - //sanity check - if p.frameSize < 0 { - return 0, NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, "Negative frame size") - } - return got, NewTTransportExceptionFromError(err) -} - -func (p *TFramedTransport) ReadByte() (c byte, err error) { - if p.frameSize == 0 { - p.frameSize, err = p.readFrameHeader() - if err != nil { - return - } - } - if p.frameSize < 1 { - return 0, NewTTransportExceptionFromError(fmt.Errorf("Not enough frame size %d to read %d bytes", p.frameSize, 1)) - } - c, err = p.reader.ReadByte() - if err == nil { - p.frameSize-- - } - return -} - -func (p *TFramedTransport) Write(buf []byte) (int, error) { - n, err := p.buf.Write(buf) - return n, NewTTransportExceptionFromError(err) -} - -func (p *TFramedTransport) WriteByte(c byte) error { - return p.buf.WriteByte(c) -} - -func (p *TFramedTransport) WriteString(s string) (n int, err error) { - return p.buf.WriteString(s) -} - -func (p *TFramedTransport) Flush(ctx context.Context) error { - size := p.buf.Len() - buf := p.buffer[:4] - binary.BigEndian.PutUint32(buf, uint32(size)) - _, err := p.transport.Write(buf) - if err != nil { - p.buf.Truncate(0) - return NewTTransportExceptionFromError(err) - } - if size > 0 { - if n, err := p.buf.WriteTo(p.transport); err != nil { - print("Error while flushing write buffer of size ", size, " to transport, only wrote ", n, " bytes: ", err.Error(), "\n") - p.buf.Truncate(0) - return NewTTransportExceptionFromError(err) - } - } - err = p.transport.Flush(ctx) - return NewTTransportExceptionFromError(err) -} - -func (p *TFramedTransport) readFrameHeader() (uint32, error) { - buf := p.buffer[:4] - if _, err := io.ReadFull(p.reader, buf); err != nil { - return 0, err - } - size := binary.BigEndian.Uint32(buf) - if size < 0 || size > p.maxLength { - return 0, NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, fmt.Sprintf("Incorrect frame size (%d)", size)) - } - return size, nil -} - -func (p *TFramedTransport) RemainingBytes() (num_bytes uint64) { - return uint64(p.frameSize) -} diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/header_protocol.go b/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/header_protocol.go deleted file mode 100644 index 46205b28b..000000000 --- a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/header_protocol.go +++ /dev/null @@ -1,305 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" -) - -// THeaderProtocol is a thrift protocol that implements THeader: -// https://github.com/apache/thrift/blob/master/doc/specs/HeaderFormat.md -// -// It supports either binary or compact protocol as the wrapped protocol. -// -// Most of the THeader handlings are happening inside THeaderTransport. -type THeaderProtocol struct { - transport *THeaderTransport - - // Will be initialized on first read/write. - protocol TProtocol -} - -// NewTHeaderProtocol creates a new THeaderProtocol from the underlying -// transport. The passed in transport will be wrapped with THeaderTransport. -// -// Note that THeaderTransport handles frame and zlib by itself, -// so the underlying transport should be a raw socket transports (TSocket or TSSLSocket), -// instead of rich transports like TZlibTransport or TFramedTransport. -func NewTHeaderProtocol(trans TTransport) *THeaderProtocol { - t := NewTHeaderTransport(trans) - p, _ := THeaderProtocolDefault.GetProtocol(t) - return &THeaderProtocol{ - transport: t, - protocol: p, - } -} - -type tHeaderProtocolFactory struct{} - -func (tHeaderProtocolFactory) GetProtocol(trans TTransport) TProtocol { - return NewTHeaderProtocol(trans) -} - -// NewTHeaderProtocolFactory creates a factory for THeader. -// -// It's a wrapper for NewTHeaderProtocol -func NewTHeaderProtocolFactory() TProtocolFactory { - return tHeaderProtocolFactory{} -} - -// Transport returns the underlying transport. -// -// It's guaranteed to be of type *THeaderTransport. -func (p *THeaderProtocol) Transport() TTransport { - return p.transport -} - -// GetReadHeaders returns the THeaderMap read from transport. -func (p *THeaderProtocol) GetReadHeaders() THeaderMap { - return p.transport.GetReadHeaders() -} - -// SetWriteHeader sets a header for write. -func (p *THeaderProtocol) SetWriteHeader(key, value string) { - p.transport.SetWriteHeader(key, value) -} - -// ClearWriteHeaders clears all write headers previously set. -func (p *THeaderProtocol) ClearWriteHeaders() { - p.transport.ClearWriteHeaders() -} - -// AddTransform add a transform for writing. -func (p *THeaderProtocol) AddTransform(transform THeaderTransformID) error { - return p.transport.AddTransform(transform) -} - -func (p *THeaderProtocol) Flush(ctx context.Context) error { - return p.transport.Flush(ctx) -} - -func (p *THeaderProtocol) WriteMessageBegin(name string, typeID TMessageType, seqID int32) error { - newProto, err := p.transport.Protocol().GetProtocol(p.transport) - if err != nil { - return err - } - p.protocol = newProto - p.transport.SequenceID = seqID - return p.protocol.WriteMessageBegin(name, typeID, seqID) -} - -func (p *THeaderProtocol) WriteMessageEnd() error { - if err := p.protocol.WriteMessageEnd(); err != nil { - return err - } - return p.transport.Flush(context.Background()) -} - -func (p *THeaderProtocol) WriteStructBegin(name string) error { - return p.protocol.WriteStructBegin(name) -} - -func (p *THeaderProtocol) WriteStructEnd() error { - return p.protocol.WriteStructEnd() -} - -func (p *THeaderProtocol) WriteFieldBegin(name string, typeID TType, id int16) error { - return p.protocol.WriteFieldBegin(name, typeID, id) -} - -func (p *THeaderProtocol) WriteFieldEnd() error { - return p.protocol.WriteFieldEnd() -} - -func (p *THeaderProtocol) WriteFieldStop() error { - return p.protocol.WriteFieldStop() -} - -func (p *THeaderProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error { - return p.protocol.WriteMapBegin(keyType, valueType, size) -} - -func (p *THeaderProtocol) WriteMapEnd() error { - return p.protocol.WriteMapEnd() -} - -func (p *THeaderProtocol) WriteListBegin(elemType TType, size int) error { - return p.protocol.WriteListBegin(elemType, size) -} - -func (p *THeaderProtocol) WriteListEnd() error { - return p.protocol.WriteListEnd() -} - -func (p *THeaderProtocol) WriteSetBegin(elemType TType, size int) error { - return p.protocol.WriteSetBegin(elemType, size) -} - -func (p *THeaderProtocol) WriteSetEnd() error { - return p.protocol.WriteSetEnd() -} - -func (p *THeaderProtocol) WriteBool(value bool) error { - return p.protocol.WriteBool(value) -} - -func (p *THeaderProtocol) WriteByte(value int8) error { - return p.protocol.WriteByte(value) -} - -func (p *THeaderProtocol) WriteI16(value int16) error { - return p.protocol.WriteI16(value) -} - -func (p *THeaderProtocol) WriteI32(value int32) error { - return p.protocol.WriteI32(value) -} - -func (p *THeaderProtocol) WriteI64(value int64) error { - return p.protocol.WriteI64(value) -} - -func (p *THeaderProtocol) WriteDouble(value float64) error { - return p.protocol.WriteDouble(value) -} - -func (p *THeaderProtocol) WriteString(value string) error { - return p.protocol.WriteString(value) -} - -func (p *THeaderProtocol) WriteBinary(value []byte) error { - return p.protocol.WriteBinary(value) -} - -// ReadFrame calls underlying THeaderTransport's ReadFrame function. -func (p *THeaderProtocol) ReadFrame() error { - return p.transport.ReadFrame() -} - -func (p *THeaderProtocol) ReadMessageBegin() (name string, typeID TMessageType, seqID int32, err error) { - if err = p.transport.ReadFrame(); err != nil { - return - } - - var newProto TProtocol - newProto, err = p.transport.Protocol().GetProtocol(p.transport) - if err != nil { - tAppExc, ok := err.(TApplicationException) - if !ok { - return - } - if e := p.protocol.WriteMessageBegin("", EXCEPTION, seqID); e != nil { - return - } - if e := tAppExc.Write(p.protocol); e != nil { - return - } - if e := p.protocol.WriteMessageEnd(); e != nil { - return - } - if e := p.transport.Flush(context.Background()); e != nil { - return - } - return - } - p.protocol = newProto - - return p.protocol.ReadMessageBegin() -} - -func (p *THeaderProtocol) ReadMessageEnd() error { - return p.protocol.ReadMessageEnd() -} - -func (p *THeaderProtocol) ReadStructBegin() (name string, err error) { - return p.protocol.ReadStructBegin() -} - -func (p *THeaderProtocol) ReadStructEnd() error { - return p.protocol.ReadStructEnd() -} - -func (p *THeaderProtocol) ReadFieldBegin() (name string, typeID TType, id int16, err error) { - return p.protocol.ReadFieldBegin() -} - -func (p *THeaderProtocol) ReadFieldEnd() error { - return p.protocol.ReadFieldEnd() -} - -func (p *THeaderProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, err error) { - return p.protocol.ReadMapBegin() -} - -func (p *THeaderProtocol) ReadMapEnd() error { - return p.protocol.ReadMapEnd() -} - -func (p *THeaderProtocol) ReadListBegin() (elemType TType, size int, err error) { - return p.protocol.ReadListBegin() -} - -func (p *THeaderProtocol) ReadListEnd() error { - return p.protocol.ReadListEnd() -} - -func (p *THeaderProtocol) ReadSetBegin() (elemType TType, size int, err error) { - return p.protocol.ReadSetBegin() -} - -func (p *THeaderProtocol) ReadSetEnd() error { - return p.protocol.ReadSetEnd() -} - -func (p *THeaderProtocol) ReadBool() (value bool, err error) { - return p.protocol.ReadBool() -} - -func (p *THeaderProtocol) ReadByte() (value int8, err error) { - return p.protocol.ReadByte() -} - -func (p *THeaderProtocol) ReadI16() (value int16, err error) { - return p.protocol.ReadI16() -} - -func (p *THeaderProtocol) ReadI32() (value int32, err error) { - return p.protocol.ReadI32() -} - -func (p *THeaderProtocol) ReadI64() (value int64, err error) { - return p.protocol.ReadI64() -} - -func (p *THeaderProtocol) ReadDouble() (value float64, err error) { - return p.protocol.ReadDouble() -} - -func (p *THeaderProtocol) ReadString() (value string, err error) { - return p.protocol.ReadString() -} - -func (p *THeaderProtocol) ReadBinary() (value []byte, err error) { - return p.protocol.ReadBinary() -} - -func (p *THeaderProtocol) Skip(fieldType TType) error { - return p.protocol.Skip(fieldType) -} diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/protocol.go b/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/protocol.go deleted file mode 100644 index 2e6bc4b16..000000000 --- a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/protocol.go +++ /dev/null @@ -1,177 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "errors" - "fmt" -) - -const ( - VERSION_MASK = 0xffff0000 - VERSION_1 = 0x80010000 -) - -type TProtocol interface { - WriteMessageBegin(name string, typeId TMessageType, seqid int32) error - WriteMessageEnd() error - WriteStructBegin(name string) error - WriteStructEnd() error - WriteFieldBegin(name string, typeId TType, id int16) error - WriteFieldEnd() error - WriteFieldStop() error - WriteMapBegin(keyType TType, valueType TType, size int) error - WriteMapEnd() error - WriteListBegin(elemType TType, size int) error - WriteListEnd() error - WriteSetBegin(elemType TType, size int) error - WriteSetEnd() error - WriteBool(value bool) error - WriteByte(value int8) error - WriteI16(value int16) error - WriteI32(value int32) error - WriteI64(value int64) error - WriteDouble(value float64) error - WriteString(value string) error - WriteBinary(value []byte) error - - ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error) - ReadMessageEnd() error - ReadStructBegin() (name string, err error) - ReadStructEnd() error - ReadFieldBegin() (name string, typeId TType, id int16, err error) - ReadFieldEnd() error - ReadMapBegin() (keyType TType, valueType TType, size int, err error) - ReadMapEnd() error - ReadListBegin() (elemType TType, size int, err error) - ReadListEnd() error - ReadSetBegin() (elemType TType, size int, err error) - ReadSetEnd() error - ReadBool() (value bool, err error) - ReadByte() (value int8, err error) - ReadI16() (value int16, err error) - ReadI32() (value int32, err error) - ReadI64() (value int64, err error) - ReadDouble() (value float64, err error) - ReadString() (value string, err error) - ReadBinary() (value []byte, err error) - - Skip(fieldType TType) (err error) - Flush(ctx context.Context) (err error) - - Transport() TTransport -} - -// The maximum recursive depth the skip() function will traverse -const DEFAULT_RECURSION_DEPTH = 64 - -// Skips over the next data element from the provided input TProtocol object. -func SkipDefaultDepth(prot TProtocol, typeId TType) (err error) { - return Skip(prot, typeId, DEFAULT_RECURSION_DEPTH) -} - -// Skips over the next data element from the provided input TProtocol object. -func Skip(self TProtocol, fieldType TType, maxDepth int) (err error) { - - if maxDepth <= 0 { - return NewTProtocolExceptionWithType(DEPTH_LIMIT, errors.New("Depth limit exceeded")) - } - - switch fieldType { - case BOOL: - _, err = self.ReadBool() - return - case BYTE: - _, err = self.ReadByte() - return - case I16: - _, err = self.ReadI16() - return - case I32: - _, err = self.ReadI32() - return - case I64: - _, err = self.ReadI64() - return - case DOUBLE: - _, err = self.ReadDouble() - return - case STRING: - _, err = self.ReadString() - return - case STRUCT: - if _, err = self.ReadStructBegin(); err != nil { - return err - } - for { - _, typeId, _, _ := self.ReadFieldBegin() - if typeId == STOP { - break - } - err := Skip(self, typeId, maxDepth-1) - if err != nil { - return err - } - self.ReadFieldEnd() - } - return self.ReadStructEnd() - case MAP: - keyType, valueType, size, err := self.ReadMapBegin() - if err != nil { - return err - } - for i := 0; i < size; i++ { - err := Skip(self, keyType, maxDepth-1) - if err != nil { - return err - } - self.Skip(valueType) - } - return self.ReadMapEnd() - case SET: - elemType, size, err := self.ReadSetBegin() - if err != nil { - return err - } - for i := 0; i < size; i++ { - err := Skip(self, elemType, maxDepth-1) - if err != nil { - return err - } - } - return self.ReadSetEnd() - case LIST: - elemType, size, err := self.ReadListBegin() - if err != nil { - return err - } - for i := 0; i < size; i++ { - err := Skip(self, elemType, maxDepth-1) - if err != nil { - return err - } - } - return self.ReadListEnd() - default: - return NewTProtocolExceptionWithType(INVALID_DATA, errors.New(fmt.Sprintf("Unknown data type %d", fieldType))) - } - return nil -} diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/serializer.go b/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/serializer.go deleted file mode 100644 index 1ff4d3754..000000000 --- a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/serializer.go +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" -) - -type TSerializer struct { - Transport *TMemoryBuffer - Protocol TProtocol -} - -type TStruct interface { - Write(p TProtocol) error - Read(p TProtocol) error -} - -func NewTSerializer() *TSerializer { - transport := NewTMemoryBufferLen(1024) - protocol := NewTBinaryProtocolFactoryDefault().GetProtocol(transport) - - return &TSerializer{ - transport, - protocol} -} - -func (t *TSerializer) WriteString(ctx context.Context, msg TStruct) (s string, err error) { - t.Transport.Reset() - - if err = msg.Write(t.Protocol); err != nil { - return - } - - if err = t.Protocol.Flush(ctx); err != nil { - return - } - if err = t.Transport.Flush(ctx); err != nil { - return - } - - return t.Transport.String(), nil -} - -func (t *TSerializer) Write(ctx context.Context, msg TStruct) (b []byte, err error) { - t.Transport.Reset() - - if err = msg.Write(t.Protocol); err != nil { - return - } - - if err = t.Protocol.Flush(ctx); err != nil { - return - } - - if err = t.Transport.Flush(ctx); err != nil { - return - } - - b = append(b, t.Transport.Bytes()...) - return -} diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/socket.go b/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/socket.go deleted file mode 100644 index 88b98f591..000000000 --- a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/socket.go +++ /dev/null @@ -1,166 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "net" - "time" -) - -type TSocket struct { - conn net.Conn - addr net.Addr - timeout time.Duration -} - -// NewTSocket creates a net.Conn-backed TTransport, given a host and port -// -// Example: -// trans, err := thrift.NewTSocket("localhost:9090") -func NewTSocket(hostPort string) (*TSocket, error) { - return NewTSocketTimeout(hostPort, 0) -} - -// NewTSocketTimeout creates a net.Conn-backed TTransport, given a host and port -// it also accepts a timeout as a time.Duration -func NewTSocketTimeout(hostPort string, timeout time.Duration) (*TSocket, error) { - //conn, err := net.DialTimeout(network, address, timeout) - addr, err := net.ResolveTCPAddr("tcp", hostPort) - if err != nil { - return nil, err - } - return NewTSocketFromAddrTimeout(addr, timeout), nil -} - -// Creates a TSocket from a net.Addr -func NewTSocketFromAddrTimeout(addr net.Addr, timeout time.Duration) *TSocket { - return &TSocket{addr: addr, timeout: timeout} -} - -// Creates a TSocket from an existing net.Conn -func NewTSocketFromConnTimeout(conn net.Conn, timeout time.Duration) *TSocket { - return &TSocket{conn: conn, addr: conn.RemoteAddr(), timeout: timeout} -} - -// Sets the socket timeout -func (p *TSocket) SetTimeout(timeout time.Duration) error { - p.timeout = timeout - return nil -} - -func (p *TSocket) pushDeadline(read, write bool) { - var t time.Time - if p.timeout > 0 { - t = time.Now().Add(time.Duration(p.timeout)) - } - if read && write { - p.conn.SetDeadline(t) - } else if read { - p.conn.SetReadDeadline(t) - } else if write { - p.conn.SetWriteDeadline(t) - } -} - -// Connects the socket, creating a new socket object if necessary. -func (p *TSocket) Open() error { - if p.IsOpen() { - return NewTTransportException(ALREADY_OPEN, "Socket already connected.") - } - if p.addr == nil { - return NewTTransportException(NOT_OPEN, "Cannot open nil address.") - } - if len(p.addr.Network()) == 0 { - return NewTTransportException(NOT_OPEN, "Cannot open bad network name.") - } - if len(p.addr.String()) == 0 { - return NewTTransportException(NOT_OPEN, "Cannot open bad address.") - } - var err error - if p.conn, err = net.DialTimeout(p.addr.Network(), p.addr.String(), p.timeout); err != nil { - return NewTTransportException(NOT_OPEN, err.Error()) - } - return nil -} - -// Retrieve the underlying net.Conn -func (p *TSocket) Conn() net.Conn { - return p.conn -} - -// Returns true if the connection is open -func (p *TSocket) IsOpen() bool { - if p.conn == nil { - return false - } - return true -} - -// Closes the socket. -func (p *TSocket) Close() error { - // Close the socket - if p.conn != nil { - err := p.conn.Close() - if err != nil { - return err - } - p.conn = nil - } - return nil -} - -//Returns the remote address of the socket. -func (p *TSocket) Addr() net.Addr { - return p.addr -} - -func (p *TSocket) Read(buf []byte) (int, error) { - if !p.IsOpen() { - return 0, NewTTransportException(NOT_OPEN, "Connection not open") - } - p.pushDeadline(true, false) - n, err := p.conn.Read(buf) - return n, NewTTransportExceptionFromError(err) -} - -func (p *TSocket) Write(buf []byte) (int, error) { - if !p.IsOpen() { - return 0, NewTTransportException(NOT_OPEN, "Connection not open") - } - p.pushDeadline(false, true) - return p.conn.Write(buf) -} - -func (p *TSocket) Flush(ctx context.Context) error { - return nil -} - -func (p *TSocket) Interrupt() error { - if !p.IsOpen() { - return nil - } - return p.conn.Close() -} - -func (p *TSocket) RemainingBytes() (num_bytes uint64) { - const maxSize = ^uint64(0) - return maxSize // the truth is, we just don't know unless framed is used -} diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/ssl_socket.go b/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/ssl_socket.go deleted file mode 100644 index ba6337726..000000000 --- a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/ssl_socket.go +++ /dev/null @@ -1,176 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "crypto/tls" - "net" - "time" -) - -type TSSLSocket struct { - conn net.Conn - // hostPort contains host:port (e.g. "asdf.com:12345"). The field is - // only valid if addr is nil. - hostPort string - // addr is nil when hostPort is not "", and is only used when the - // TSSLSocket is constructed from a net.Addr. - addr net.Addr - timeout time.Duration - cfg *tls.Config -} - -// NewTSSLSocket creates a net.Conn-backed TTransport, given a host and port and tls Configuration -// -// Example: -// trans, err := thrift.NewTSSLSocket("localhost:9090", nil) -func NewTSSLSocket(hostPort string, cfg *tls.Config) (*TSSLSocket, error) { - return NewTSSLSocketTimeout(hostPort, cfg, 0) -} - -// NewTSSLSocketTimeout creates a net.Conn-backed TTransport, given a host and port -// it also accepts a tls Configuration and a timeout as a time.Duration -func NewTSSLSocketTimeout(hostPort string, cfg *tls.Config, timeout time.Duration) (*TSSLSocket, error) { - if cfg.MinVersion == 0 { - cfg.MinVersion = tls.VersionTLS10 - } - return &TSSLSocket{hostPort: hostPort, timeout: timeout, cfg: cfg}, nil -} - -// Creates a TSSLSocket from a net.Addr -func NewTSSLSocketFromAddrTimeout(addr net.Addr, cfg *tls.Config, timeout time.Duration) *TSSLSocket { - return &TSSLSocket{addr: addr, timeout: timeout, cfg: cfg} -} - -// Creates a TSSLSocket from an existing net.Conn -func NewTSSLSocketFromConnTimeout(conn net.Conn, cfg *tls.Config, timeout time.Duration) *TSSLSocket { - return &TSSLSocket{conn: conn, addr: conn.RemoteAddr(), timeout: timeout, cfg: cfg} -} - -// Sets the socket timeout -func (p *TSSLSocket) SetTimeout(timeout time.Duration) error { - p.timeout = timeout - return nil -} - -func (p *TSSLSocket) pushDeadline(read, write bool) { - var t time.Time - if p.timeout > 0 { - t = time.Now().Add(time.Duration(p.timeout)) - } - if read && write { - p.conn.SetDeadline(t) - } else if read { - p.conn.SetReadDeadline(t) - } else if write { - p.conn.SetWriteDeadline(t) - } -} - -// Connects the socket, creating a new socket object if necessary. -func (p *TSSLSocket) Open() error { - var err error - // If we have a hostname, we need to pass the hostname to tls.Dial for - // certificate hostname checks. - if p.hostPort != "" { - if p.conn, err = tls.DialWithDialer(&net.Dialer{ - Timeout: p.timeout}, "tcp", p.hostPort, p.cfg); err != nil { - return NewTTransportException(NOT_OPEN, err.Error()) - } - } else { - if p.IsOpen() { - return NewTTransportException(ALREADY_OPEN, "Socket already connected.") - } - if p.addr == nil { - return NewTTransportException(NOT_OPEN, "Cannot open nil address.") - } - if len(p.addr.Network()) == 0 { - return NewTTransportException(NOT_OPEN, "Cannot open bad network name.") - } - if len(p.addr.String()) == 0 { - return NewTTransportException(NOT_OPEN, "Cannot open bad address.") - } - if p.conn, err = tls.DialWithDialer(&net.Dialer{ - Timeout: p.timeout}, p.addr.Network(), p.addr.String(), p.cfg); err != nil { - return NewTTransportException(NOT_OPEN, err.Error()) - } - } - return nil -} - -// Retrieve the underlying net.Conn -func (p *TSSLSocket) Conn() net.Conn { - return p.conn -} - -// Returns true if the connection is open -func (p *TSSLSocket) IsOpen() bool { - if p.conn == nil { - return false - } - return true -} - -// Closes the socket. -func (p *TSSLSocket) Close() error { - // Close the socket - if p.conn != nil { - err := p.conn.Close() - if err != nil { - return err - } - p.conn = nil - } - return nil -} - -func (p *TSSLSocket) Read(buf []byte) (int, error) { - if !p.IsOpen() { - return 0, NewTTransportException(NOT_OPEN, "Connection not open") - } - p.pushDeadline(true, false) - n, err := p.conn.Read(buf) - return n, NewTTransportExceptionFromError(err) -} - -func (p *TSSLSocket) Write(buf []byte) (int, error) { - if !p.IsOpen() { - return 0, NewTTransportException(NOT_OPEN, "Connection not open") - } - p.pushDeadline(false, true) - return p.conn.Write(buf) -} - -func (p *TSSLSocket) Flush(ctx context.Context) error { - return nil -} - -func (p *TSSLSocket) Interrupt() error { - if !p.IsOpen() { - return nil - } - return p.conn.Close() -} - -func (p *TSSLSocket) RemainingBytes() (num_bytes uint64) { - const maxSize = ^uint64(0) - return maxSize // the thruth is, we just don't know unless framed is used -} diff --git a/src/runtime/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/src/runtime/vendor/github.com/stretchr/testify/assert/assertion_compare.go index dc200395c..41649d267 100644 --- a/src/runtime/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/src/runtime/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -13,12 +13,42 @@ const ( compareGreater ) +var ( + intType = reflect.TypeOf(int(1)) + int8Type = reflect.TypeOf(int8(1)) + int16Type = reflect.TypeOf(int16(1)) + int32Type = reflect.TypeOf(int32(1)) + int64Type = reflect.TypeOf(int64(1)) + + uintType = reflect.TypeOf(uint(1)) + uint8Type = reflect.TypeOf(uint8(1)) + uint16Type = reflect.TypeOf(uint16(1)) + uint32Type = reflect.TypeOf(uint32(1)) + uint64Type = reflect.TypeOf(uint64(1)) + + float32Type = reflect.TypeOf(float32(1)) + float64Type = reflect.TypeOf(float64(1)) + + stringType = reflect.TypeOf("") +) + func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { + obj1Value := reflect.ValueOf(obj1) + obj2Value := reflect.ValueOf(obj2) + + // throughout this switch we try and avoid calling .Convert() if possible, + // as this has a pretty big performance impact switch kind { case reflect.Int: { - intobj1 := obj1.(int) - intobj2 := obj2.(int) + intobj1, ok := obj1.(int) + if !ok { + intobj1 = obj1Value.Convert(intType).Interface().(int) + } + intobj2, ok := obj2.(int) + if !ok { + intobj2 = obj2Value.Convert(intType).Interface().(int) + } if intobj1 > intobj2 { return compareGreater, true } @@ -31,8 +61,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { } case reflect.Int8: { - int8obj1 := obj1.(int8) - int8obj2 := obj2.(int8) + int8obj1, ok := obj1.(int8) + if !ok { + int8obj1 = obj1Value.Convert(int8Type).Interface().(int8) + } + int8obj2, ok := obj2.(int8) + if !ok { + int8obj2 = obj2Value.Convert(int8Type).Interface().(int8) + } if int8obj1 > int8obj2 { return compareGreater, true } @@ -45,8 +81,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { } case reflect.Int16: { - int16obj1 := obj1.(int16) - int16obj2 := obj2.(int16) + int16obj1, ok := obj1.(int16) + if !ok { + int16obj1 = obj1Value.Convert(int16Type).Interface().(int16) + } + int16obj2, ok := obj2.(int16) + if !ok { + int16obj2 = obj2Value.Convert(int16Type).Interface().(int16) + } if int16obj1 > int16obj2 { return compareGreater, true } @@ -59,8 +101,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { } case reflect.Int32: { - int32obj1 := obj1.(int32) - int32obj2 := obj2.(int32) + int32obj1, ok := obj1.(int32) + if !ok { + int32obj1 = obj1Value.Convert(int32Type).Interface().(int32) + } + int32obj2, ok := obj2.(int32) + if !ok { + int32obj2 = obj2Value.Convert(int32Type).Interface().(int32) + } if int32obj1 > int32obj2 { return compareGreater, true } @@ -73,8 +121,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { } case reflect.Int64: { - int64obj1 := obj1.(int64) - int64obj2 := obj2.(int64) + int64obj1, ok := obj1.(int64) + if !ok { + int64obj1 = obj1Value.Convert(int64Type).Interface().(int64) + } + int64obj2, ok := obj2.(int64) + if !ok { + int64obj2 = obj2Value.Convert(int64Type).Interface().(int64) + } if int64obj1 > int64obj2 { return compareGreater, true } @@ -87,8 +141,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { } case reflect.Uint: { - uintobj1 := obj1.(uint) - uintobj2 := obj2.(uint) + uintobj1, ok := obj1.(uint) + if !ok { + uintobj1 = obj1Value.Convert(uintType).Interface().(uint) + } + uintobj2, ok := obj2.(uint) + if !ok { + uintobj2 = obj2Value.Convert(uintType).Interface().(uint) + } if uintobj1 > uintobj2 { return compareGreater, true } @@ -101,8 +161,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { } case reflect.Uint8: { - uint8obj1 := obj1.(uint8) - uint8obj2 := obj2.(uint8) + uint8obj1, ok := obj1.(uint8) + if !ok { + uint8obj1 = obj1Value.Convert(uint8Type).Interface().(uint8) + } + uint8obj2, ok := obj2.(uint8) + if !ok { + uint8obj2 = obj2Value.Convert(uint8Type).Interface().(uint8) + } if uint8obj1 > uint8obj2 { return compareGreater, true } @@ -115,8 +181,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { } case reflect.Uint16: { - uint16obj1 := obj1.(uint16) - uint16obj2 := obj2.(uint16) + uint16obj1, ok := obj1.(uint16) + if !ok { + uint16obj1 = obj1Value.Convert(uint16Type).Interface().(uint16) + } + uint16obj2, ok := obj2.(uint16) + if !ok { + uint16obj2 = obj2Value.Convert(uint16Type).Interface().(uint16) + } if uint16obj1 > uint16obj2 { return compareGreater, true } @@ -129,8 +201,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { } case reflect.Uint32: { - uint32obj1 := obj1.(uint32) - uint32obj2 := obj2.(uint32) + uint32obj1, ok := obj1.(uint32) + if !ok { + uint32obj1 = obj1Value.Convert(uint32Type).Interface().(uint32) + } + uint32obj2, ok := obj2.(uint32) + if !ok { + uint32obj2 = obj2Value.Convert(uint32Type).Interface().(uint32) + } if uint32obj1 > uint32obj2 { return compareGreater, true } @@ -143,8 +221,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { } case reflect.Uint64: { - uint64obj1 := obj1.(uint64) - uint64obj2 := obj2.(uint64) + uint64obj1, ok := obj1.(uint64) + if !ok { + uint64obj1 = obj1Value.Convert(uint64Type).Interface().(uint64) + } + uint64obj2, ok := obj2.(uint64) + if !ok { + uint64obj2 = obj2Value.Convert(uint64Type).Interface().(uint64) + } if uint64obj1 > uint64obj2 { return compareGreater, true } @@ -157,8 +241,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { } case reflect.Float32: { - float32obj1 := obj1.(float32) - float32obj2 := obj2.(float32) + float32obj1, ok := obj1.(float32) + if !ok { + float32obj1 = obj1Value.Convert(float32Type).Interface().(float32) + } + float32obj2, ok := obj2.(float32) + if !ok { + float32obj2 = obj2Value.Convert(float32Type).Interface().(float32) + } if float32obj1 > float32obj2 { return compareGreater, true } @@ -171,8 +261,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { } case reflect.Float64: { - float64obj1 := obj1.(float64) - float64obj2 := obj2.(float64) + float64obj1, ok := obj1.(float64) + if !ok { + float64obj1 = obj1Value.Convert(float64Type).Interface().(float64) + } + float64obj2, ok := obj2.(float64) + if !ok { + float64obj2 = obj2Value.Convert(float64Type).Interface().(float64) + } if float64obj1 > float64obj2 { return compareGreater, true } @@ -185,8 +281,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { } case reflect.String: { - stringobj1 := obj1.(string) - stringobj2 := obj2.(string) + stringobj1, ok := obj1.(string) + if !ok { + stringobj1 = obj1Value.Convert(stringType).Interface().(string) + } + stringobj2, ok := obj2.(string) + if !ok { + stringobj2 = obj2Value.Convert(stringType).Interface().(string) + } if stringobj1 > stringobj2 { return compareGreater, true } @@ -240,6 +342,24 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs) } +// Positive asserts that the specified element is positive +// +// assert.Positive(t, 1) +// assert.Positive(t, 1.23) +func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { + zero := reflect.Zero(reflect.TypeOf(e)) + return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs) +} + +// Negative asserts that the specified element is negative +// +// assert.Negative(t, -1) +// assert.Negative(t, -1.23) +func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { + zero := reflect.Zero(reflect.TypeOf(e)) + return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs) +} + func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/src/runtime/vendor/github.com/stretchr/testify/assert/assertion_format.go b/src/runtime/vendor/github.com/stretchr/testify/assert/assertion_format.go index 49370eb16..4dfd1229a 100644 --- a/src/runtime/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/src/runtime/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -114,6 +114,24 @@ func Errorf(t TestingT, err error, msg string, args ...interface{}) bool { return Error(t, err, append([]interface{}{msg}, args...)...) } +// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value. +// This is a wrapper for errors.As. +func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return ErrorAs(t, err, target, append([]interface{}{msg}, args...)...) +} + +// ErrorIsf asserts that at least one of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return ErrorIs(t, err, target, append([]interface{}{msg}, args...)...) +} + // Eventuallyf asserts that given condition will be met in waitFor time, // periodically checking target function each tick. // @@ -321,6 +339,54 @@ func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsil return InEpsilonSlice(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...) } +// IsDecreasingf asserts that the collection is decreasing +// +// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted") +// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted") +// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted") +func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return IsDecreasing(t, object, append([]interface{}{msg}, args...)...) +} + +// IsIncreasingf asserts that the collection is increasing +// +// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted") +// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted") +// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted") +func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return IsIncreasing(t, object, append([]interface{}{msg}, args...)...) +} + +// IsNonDecreasingf asserts that the collection is not decreasing +// +// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted") +// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted") +// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted") +func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return IsNonDecreasing(t, object, append([]interface{}{msg}, args...)...) +} + +// IsNonIncreasingf asserts that the collection is not increasing +// +// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted") +// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted") +// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted") +func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return IsNonIncreasing(t, object, append([]interface{}{msg}, args...)...) +} + // IsTypef asserts that the specified objects are of the same type. func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -375,6 +441,17 @@ func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args . return LessOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...) } +// Negativef asserts that the specified element is negative +// +// assert.Negativef(t, -1, "error message %s", "formatted") +// assert.Negativef(t, -1.23, "error message %s", "formatted") +func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Negative(t, e, append([]interface{}{msg}, args...)...) +} + // Neverf asserts that the given condition doesn't satisfy in waitFor time, // periodically checking the target function each tick. // @@ -476,6 +553,15 @@ func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg s return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...) } +// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotErrorIs(t, err, target, append([]interface{}{msg}, args...)...) +} + // NotNilf asserts that the specified object is not nil. // // assert.NotNilf(t, err, "error message %s", "formatted") @@ -572,6 +658,17 @@ func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg str return PanicsWithValue(t, expected, f, append([]interface{}{msg}, args...)...) } +// Positivef asserts that the specified element is positive +// +// assert.Positivef(t, 1, "error message %s", "formatted") +// assert.Positivef(t, 1.23, "error message %s", "formatted") +func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Positive(t, e, append([]interface{}{msg}, args...)...) +} + // Regexpf asserts that a specified regexp matches a string. // // assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") diff --git a/src/runtime/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/src/runtime/vendor/github.com/stretchr/testify/assert/assertion_forward.go index 9db889427..25337a6f0 100644 --- a/src/runtime/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/src/runtime/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -204,6 +204,42 @@ func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { return Error(a.t, err, msgAndArgs...) } +// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value. +// This is a wrapper for errors.As. +func (a *Assertions) ErrorAs(err error, target interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorAs(a.t, err, target, msgAndArgs...) +} + +// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value. +// This is a wrapper for errors.As. +func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorAsf(a.t, err, target, msg, args...) +} + +// ErrorIs asserts that at least one of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorIs(a.t, err, target, msgAndArgs...) +} + +// ErrorIsf asserts that at least one of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorIsf(a.t, err, target, msg, args...) +} + // Errorf asserts that a function returned an error (i.e. not `nil`). // // actualObj, err := SomeFunction() @@ -631,6 +667,102 @@ func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilo return InEpsilonf(a.t, expected, actual, epsilon, msg, args...) } +// IsDecreasing asserts that the collection is decreasing +// +// a.IsDecreasing([]int{2, 1, 0}) +// a.IsDecreasing([]float{2, 1}) +// a.IsDecreasing([]string{"b", "a"}) +func (a *Assertions) IsDecreasing(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsDecreasing(a.t, object, msgAndArgs...) +} + +// IsDecreasingf asserts that the collection is decreasing +// +// a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted") +// a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted") +// a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted") +func (a *Assertions) IsDecreasingf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsDecreasingf(a.t, object, msg, args...) +} + +// IsIncreasing asserts that the collection is increasing +// +// a.IsIncreasing([]int{1, 2, 3}) +// a.IsIncreasing([]float{1, 2}) +// a.IsIncreasing([]string{"a", "b"}) +func (a *Assertions) IsIncreasing(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsIncreasing(a.t, object, msgAndArgs...) +} + +// IsIncreasingf asserts that the collection is increasing +// +// a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted") +// a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted") +// a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted") +func (a *Assertions) IsIncreasingf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsIncreasingf(a.t, object, msg, args...) +} + +// IsNonDecreasing asserts that the collection is not decreasing +// +// a.IsNonDecreasing([]int{1, 1, 2}) +// a.IsNonDecreasing([]float{1, 2}) +// a.IsNonDecreasing([]string{"a", "b"}) +func (a *Assertions) IsNonDecreasing(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsNonDecreasing(a.t, object, msgAndArgs...) +} + +// IsNonDecreasingf asserts that the collection is not decreasing +// +// a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted") +// a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted") +// a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted") +func (a *Assertions) IsNonDecreasingf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsNonDecreasingf(a.t, object, msg, args...) +} + +// IsNonIncreasing asserts that the collection is not increasing +// +// a.IsNonIncreasing([]int{2, 1, 1}) +// a.IsNonIncreasing([]float{2, 1}) +// a.IsNonIncreasing([]string{"b", "a"}) +func (a *Assertions) IsNonIncreasing(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsNonIncreasing(a.t, object, msgAndArgs...) +} + +// IsNonIncreasingf asserts that the collection is not increasing +// +// a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted") +// a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted") +// a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted") +func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsNonIncreasingf(a.t, object, msg, args...) +} + // IsType asserts that the specified objects are of the same type. func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { @@ -739,6 +871,28 @@ func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...i return Lessf(a.t, e1, e2, msg, args...) } +// Negative asserts that the specified element is negative +// +// a.Negative(-1) +// a.Negative(-1.23) +func (a *Assertions) Negative(e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Negative(a.t, e, msgAndArgs...) +} + +// Negativef asserts that the specified element is negative +// +// a.Negativef(-1, "error message %s", "formatted") +// a.Negativef(-1.23, "error message %s", "formatted") +func (a *Assertions) Negativef(e interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Negativef(a.t, e, msg, args...) +} + // Never asserts that the given condition doesn't satisfy in waitFor time, // periodically checking the target function each tick. // @@ -941,6 +1095,24 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str return NotEqualf(a.t, expected, actual, msg, args...) } +// NotErrorIs asserts that at none of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotErrorIs(a.t, err, target, msgAndArgs...) +} + +// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotErrorIsf(a.t, err, target, msg, args...) +} + // NotNil asserts that the specified object is not nil. // // a.NotNil(err) @@ -1133,6 +1305,28 @@ func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) b return Panicsf(a.t, f, msg, args...) } +// Positive asserts that the specified element is positive +// +// a.Positive(1) +// a.Positive(1.23) +func (a *Assertions) Positive(e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Positive(a.t, e, msgAndArgs...) +} + +// Positivef asserts that the specified element is positive +// +// a.Positivef(1, "error message %s", "formatted") +// a.Positivef(1.23, "error message %s", "formatted") +func (a *Assertions) Positivef(e interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Positivef(a.t, e, msg, args...) +} + // Regexp asserts that a specified regexp matches a string. // // a.Regexp(regexp.MustCompile("start"), "it's starting") diff --git a/src/runtime/vendor/github.com/stretchr/testify/assert/assertion_order.go b/src/runtime/vendor/github.com/stretchr/testify/assert/assertion_order.go new file mode 100644 index 000000000..1c3b47182 --- /dev/null +++ b/src/runtime/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -0,0 +1,81 @@ +package assert + +import ( + "fmt" + "reflect" +) + +// isOrdered checks that collection contains orderable elements. +func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { + objKind := reflect.TypeOf(object).Kind() + if objKind != reflect.Slice && objKind != reflect.Array { + return false + } + + objValue := reflect.ValueOf(object) + objLen := objValue.Len() + + if objLen <= 1 { + return true + } + + value := objValue.Index(0) + valueInterface := value.Interface() + firstValueKind := value.Kind() + + for i := 1; i < objLen; i++ { + prevValue := value + prevValueInterface := valueInterface + + value = objValue.Index(i) + valueInterface = value.Interface() + + compareResult, isComparable := compare(prevValueInterface, valueInterface, firstValueKind) + + if !isComparable { + return Fail(t, fmt.Sprintf("Can not compare type \"%s\" and \"%s\"", reflect.TypeOf(value), reflect.TypeOf(prevValue)), msgAndArgs...) + } + + if !containsValue(allowedComparesResults, compareResult) { + return Fail(t, fmt.Sprintf(failMessage, prevValue, value), msgAndArgs...) + } + } + + return true +} + +// IsIncreasing asserts that the collection is increasing +// +// assert.IsIncreasing(t, []int{1, 2, 3}) +// assert.IsIncreasing(t, []float{1, 2}) +// assert.IsIncreasing(t, []string{"a", "b"}) +func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs) +} + +// IsNonIncreasing asserts that the collection is not increasing +// +// assert.IsNonIncreasing(t, []int{2, 1, 1}) +// assert.IsNonIncreasing(t, []float{2, 1}) +// assert.IsNonIncreasing(t, []string{"b", "a"}) +func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs) +} + +// IsDecreasing asserts that the collection is decreasing +// +// assert.IsDecreasing(t, []int{2, 1, 0}) +// assert.IsDecreasing(t, []float{2, 1}) +// assert.IsDecreasing(t, []string{"b", "a"}) +func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs) +} + +// IsNonDecreasing asserts that the collection is not decreasing +// +// assert.IsNonDecreasing(t, []int{1, 1, 2}) +// assert.IsNonDecreasing(t, []float{1, 2}) +// assert.IsNonDecreasing(t, []string{"a", "b"}) +func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs) +} diff --git a/src/runtime/vendor/github.com/stretchr/testify/assert/assertions.go b/src/runtime/vendor/github.com/stretchr/testify/assert/assertions.go index 914a10d83..bcac4401f 100644 --- a/src/runtime/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/src/runtime/vendor/github.com/stretchr/testify/assert/assertions.go @@ -172,8 +172,8 @@ func isTest(name, prefix string) bool { if len(name) == len(prefix) { // "Test" is ok return true } - rune, _ := utf8.DecodeRuneInString(name[len(prefix):]) - return !unicode.IsLower(rune) + r, _ := utf8.DecodeRuneInString(name[len(prefix):]) + return !unicode.IsLower(r) } func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { @@ -1622,6 +1622,7 @@ var spewConfig = spew.ConfigState{ DisableCapacities: true, SortKeys: true, DisableMethods: true, + MaxDepth: 10, } type tHelper interface { @@ -1693,3 +1694,81 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D } } } + +// ErrorIs asserts that at least one of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if errors.Is(err, target) { + return true + } + + var expectedText string + if target != nil { + expectedText = target.Error() + } + + chain := buildErrorChainString(err) + + return Fail(t, fmt.Sprintf("Target error should be in err chain:\n"+ + "expected: %q\n"+ + "in chain: %s", expectedText, chain, + ), msgAndArgs...) +} + +// NotErrorIs asserts that at none of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if !errors.Is(err, target) { + return true + } + + var expectedText string + if target != nil { + expectedText = target.Error() + } + + chain := buildErrorChainString(err) + + return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+ + "found: %q\n"+ + "in chain: %s", expectedText, chain, + ), msgAndArgs...) +} + +// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value. +// This is a wrapper for errors.As. +func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if errors.As(err, target) { + return true + } + + chain := buildErrorChainString(err) + + return Fail(t, fmt.Sprintf("Should be in error chain:\n"+ + "expected: %q\n"+ + "in chain: %s", target, chain, + ), msgAndArgs...) +} + +func buildErrorChainString(err error) string { + if err == nil { + return "" + } + + e := errors.Unwrap(err) + chain := fmt.Sprintf("%q", err.Error()) + for e != nil { + chain += fmt.Sprintf("\n\t%q", e.Error()) + e = errors.Unwrap(e) + } + return chain +} diff --git a/src/runtime/vendor/go.opencensus.io/.travis.yml b/src/runtime/vendor/go.opencensus.io/.travis.yml deleted file mode 100644 index bd6b66ee8..000000000 --- a/src/runtime/vendor/go.opencensus.io/.travis.yml +++ /dev/null @@ -1,17 +0,0 @@ -language: go - -go_import_path: go.opencensus.io - -go: - - 1.11.x - -env: - global: - GO111MODULE=on - -before_script: - - make install-tools - -script: - - make travis-ci - - go run internal/check/version.go # TODO move this to makefile diff --git a/src/runtime/vendor/go.opencensus.io/go.mod b/src/runtime/vendor/go.opencensus.io/go.mod index c867df5f5..95b2522a7 100644 --- a/src/runtime/vendor/go.opencensus.io/go.mod +++ b/src/runtime/vendor/go.opencensus.io/go.mod @@ -1,15 +1,12 @@ module go.opencensus.io require ( - github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 - github.com/golang/protobuf v1.3.1 - github.com/google/go-cmp v0.3.0 - github.com/stretchr/testify v1.4.0 - golang.org/x/net v0.0.0-20190620200207-3b0461eec859 - golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd // indirect - golang.org/x/text v0.3.2 // indirect - google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb // indirect - google.golang.org/grpc v1.20.1 + github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e + github.com/golang/protobuf v1.4.3 + github.com/google/go-cmp v0.5.3 + github.com/stretchr/testify v1.6.1 + golang.org/x/net v0.0.0-20201110031124-69a78807bb2b + google.golang.org/grpc v1.33.2 ) go 1.13 diff --git a/src/runtime/vendor/go.opencensus.io/go.sum b/src/runtime/vendor/go.opencensus.io/go.sum index 01c02972c..c97cd1b55 100644 --- a/src/runtime/vendor/go.opencensus.io/go.sum +++ b/src/runtime/vendor/go.opencensus.io/go.sum @@ -1,25 +1,47 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3 h1:x95R7cp+rSeeqAMI2knLtQ0DKlaBhv2NrtrOvafPHRo= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -30,24 +52,25 @@ golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd h1:r7DufRZuZbWB7j439YfAzP8RPDa9unLkpwQKUYbIMPI= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -55,20 +78,39 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3 golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd h1:/e+gpKk9r3dJobndpTytxS2gOy6m5uvpg+ISQoEcusQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb h1:i1Ppqkc3WQXikh8bXiwHqAN5Rv3/qDCcRk0/Otx73BY= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.2 h1:EQyQC3sa8M+p6Ulc8yy9SWSS2GVwyRc83gAbG8lrl4o= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/src/runtime/vendor/go.opencensus.io/trace/basetypes.go b/src/runtime/vendor/go.opencensus.io/trace/basetypes.go index 0c54492a2..c8e26ed63 100644 --- a/src/runtime/vendor/go.opencensus.io/trace/basetypes.go +++ b/src/runtime/vendor/go.opencensus.io/trace/basetypes.go @@ -49,6 +49,16 @@ type Attribute struct { value interface{} } +// Key returns the attribute's key +func (a *Attribute) Key() string { + return a.key +} + +// Value returns the attribute's value +func (a *Attribute) Value() interface{} { + return a.value +} + // BoolAttribute returns a bool-valued attribute. func BoolAttribute(key string, value bool) Attribute { return Attribute{key: key, value: value} diff --git a/src/runtime/vendor/go.opencensus.io/trace/spanstore.go b/src/runtime/vendor/go.opencensus.io/trace/spanstore.go index c442d9902..e601f76f2 100644 --- a/src/runtime/vendor/go.opencensus.io/trace/spanstore.go +++ b/src/runtime/vendor/go.opencensus.io/trace/spanstore.go @@ -48,8 +48,10 @@ func (i internalOnly) ReportActiveSpans(name string) []*SpanData { var out []*SpanData s.mu.Lock() defer s.mu.Unlock() - for span := range s.active { - out = append(out, span.makeSpanData()) + for activeSpan := range s.active { + if s, ok := activeSpan.(*span); ok { + out = append(out, s.makeSpanData()) + } } return out } @@ -185,7 +187,7 @@ func (i internalOnly) ReportSpansByLatency(name string, minLatency, maxLatency t // bucketed by latency. type spanStore struct { mu sync.Mutex // protects everything below. - active map[*Span]struct{} + active map[SpanInterface]struct{} errors map[int32]*bucket latency []bucket maxSpansPerErrorBucket int @@ -194,7 +196,7 @@ type spanStore struct { // newSpanStore creates a span store. func newSpanStore(name string, latencyBucketSize int, errorBucketSize int) *spanStore { s := &spanStore{ - active: make(map[*Span]struct{}), + active: make(map[SpanInterface]struct{}), latency: make([]bucket, len(defaultLatencies)+1), maxSpansPerErrorBucket: errorBucketSize, } @@ -271,7 +273,7 @@ func (s *spanStore) resize(latencyBucketSize int, errorBucketSize int) { } // add adds a span to the active bucket of the spanStore. -func (s *spanStore) add(span *Span) { +func (s *spanStore) add(span SpanInterface) { s.mu.Lock() s.active[span] = struct{}{} s.mu.Unlock() @@ -279,7 +281,7 @@ func (s *spanStore) add(span *Span) { // finished removes a span from the active set, and adds a corresponding // SpanData to a latency or error bucket. -func (s *spanStore) finished(span *Span, sd *SpanData) { +func (s *spanStore) finished(span SpanInterface, sd *SpanData) { latency := sd.EndTime.Sub(sd.StartTime) if latency < 0 { latency = 0 diff --git a/src/runtime/vendor/go.opencensus.io/trace/trace.go b/src/runtime/vendor/go.opencensus.io/trace/trace.go index 125e2cd90..861df9d39 100644 --- a/src/runtime/vendor/go.opencensus.io/trace/trace.go +++ b/src/runtime/vendor/go.opencensus.io/trace/trace.go @@ -28,12 +28,16 @@ import ( "go.opencensus.io/trace/tracestate" ) +type tracer struct{} + +var _ Tracer = &tracer{} + // Span represents a span of a trace. It has an associated SpanContext, and // stores data accumulated while the span is active. // // Ideally users should interact with Spans by calling the functions in this // package that take a Context parameter. -type Span struct { +type span struct { // data contains information recorded about the span. // // It will be non-nil if we are exporting the span or recording events for it. @@ -66,7 +70,7 @@ type Span struct { // IsRecordingEvents returns true if events are being recorded for this span. // Use this check to avoid computing expensive annotations when they will never // be used. -func (s *Span) IsRecordingEvents() bool { +func (s *span) IsRecordingEvents() bool { if s == nil { return false } @@ -109,13 +113,13 @@ type SpanContext struct { type contextKey struct{} // FromContext returns the Span stored in a context, or nil if there isn't one. -func FromContext(ctx context.Context) *Span { +func (t *tracer) FromContext(ctx context.Context) *Span { s, _ := ctx.Value(contextKey{}).(*Span) return s } // NewContext returns a new context with the given Span attached. -func NewContext(parent context.Context, s *Span) context.Context { +func (t *tracer) NewContext(parent context.Context, s *Span) context.Context { return context.WithValue(parent, contextKey{}, s) } @@ -166,12 +170,14 @@ func WithSampler(sampler Sampler) StartOption { // // Returned context contains the newly created span. You can use it to // propagate the returned span in process. -func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) { +func (t *tracer) StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) { var opts StartOptions var parent SpanContext - if p := FromContext(ctx); p != nil { - p.addChild() - parent = p.spanContext + if p := t.FromContext(ctx); p != nil { + if ps, ok := p.internal.(*span); ok { + ps.addChild() + } + parent = p.SpanContext() } for _, op := range o { op(&opts) @@ -180,7 +186,8 @@ func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Cont ctx, end := startExecutionTracerTask(ctx, name) span.executionTracerTaskEnd = end - return NewContext(ctx, span), span + extSpan := NewSpan(span) + return t.NewContext(ctx, extSpan), extSpan } // StartSpanWithRemoteParent starts a new child span of the span from the given parent. @@ -190,7 +197,7 @@ func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Cont // // Returned context contains the newly created span. You can use it to // propagate the returned span in process. -func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) { +func (t *tracer) StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) { var opts StartOptions for _, op := range o { op(&opts) @@ -198,19 +205,24 @@ func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanCont span := startSpanInternal(name, parent != SpanContext{}, parent, true, opts) ctx, end := startExecutionTracerTask(ctx, name) span.executionTracerTaskEnd = end - return NewContext(ctx, span), span + extSpan := NewSpan(span) + return t.NewContext(ctx, extSpan), extSpan } -func startSpanInternal(name string, hasParent bool, parent SpanContext, remoteParent bool, o StartOptions) *Span { - span := &Span{} - span.spanContext = parent +func startSpanInternal(name string, hasParent bool, parent SpanContext, remoteParent bool, o StartOptions) *span { + s := &span{} + s.spanContext = parent cfg := config.Load().(*Config) + if gen, ok := cfg.IDGenerator.(*defaultIDGenerator); ok { + // lazy initialization + gen.init() + } if !hasParent { - span.spanContext.TraceID = cfg.IDGenerator.NewTraceID() + s.spanContext.TraceID = cfg.IDGenerator.NewTraceID() } - span.spanContext.SpanID = cfg.IDGenerator.NewSpanID() + s.spanContext.SpanID = cfg.IDGenerator.NewSpanID() sampler := cfg.DefaultSampler if !hasParent || remoteParent || o.Sampler != nil { @@ -222,47 +234,47 @@ func startSpanInternal(name string, hasParent bool, parent SpanContext, remotePa if o.Sampler != nil { sampler = o.Sampler } - span.spanContext.setIsSampled(sampler(SamplingParameters{ + s.spanContext.setIsSampled(sampler(SamplingParameters{ ParentContext: parent, - TraceID: span.spanContext.TraceID, - SpanID: span.spanContext.SpanID, + TraceID: s.spanContext.TraceID, + SpanID: s.spanContext.SpanID, Name: name, HasRemoteParent: remoteParent}).Sample) } - if !internal.LocalSpanStoreEnabled && !span.spanContext.IsSampled() { - return span + if !internal.LocalSpanStoreEnabled && !s.spanContext.IsSampled() { + return s } - span.data = &SpanData{ - SpanContext: span.spanContext, + s.data = &SpanData{ + SpanContext: s.spanContext, StartTime: time.Now(), SpanKind: o.SpanKind, Name: name, HasRemoteParent: remoteParent, } - span.lruAttributes = newLruMap(cfg.MaxAttributesPerSpan) - span.annotations = newEvictedQueue(cfg.MaxAnnotationEventsPerSpan) - span.messageEvents = newEvictedQueue(cfg.MaxMessageEventsPerSpan) - span.links = newEvictedQueue(cfg.MaxLinksPerSpan) + s.lruAttributes = newLruMap(cfg.MaxAttributesPerSpan) + s.annotations = newEvictedQueue(cfg.MaxAnnotationEventsPerSpan) + s.messageEvents = newEvictedQueue(cfg.MaxMessageEventsPerSpan) + s.links = newEvictedQueue(cfg.MaxLinksPerSpan) if hasParent { - span.data.ParentSpanID = parent.SpanID + s.data.ParentSpanID = parent.SpanID } if internal.LocalSpanStoreEnabled { var ss *spanStore ss = spanStoreForNameCreateIfNew(name) if ss != nil { - span.spanStore = ss - ss.add(span) + s.spanStore = ss + ss.add(s) } } - return span + return s } // End ends the span. -func (s *Span) End() { +func (s *span) End() { if s == nil { return } @@ -292,7 +304,7 @@ func (s *Span) End() { // makeSpanData produces a SpanData representing the current state of the Span. // It requires that s.data is non-nil. -func (s *Span) makeSpanData() *SpanData { +func (s *span) makeSpanData() *SpanData { var sd SpanData s.mu.Lock() sd = *s.data @@ -317,7 +329,7 @@ func (s *Span) makeSpanData() *SpanData { } // SpanContext returns the SpanContext of the span. -func (s *Span) SpanContext() SpanContext { +func (s *span) SpanContext() SpanContext { if s == nil { return SpanContext{} } @@ -325,7 +337,7 @@ func (s *Span) SpanContext() SpanContext { } // SetName sets the name of the span, if it is recording events. -func (s *Span) SetName(name string) { +func (s *span) SetName(name string) { if !s.IsRecordingEvents() { return } @@ -335,7 +347,7 @@ func (s *Span) SetName(name string) { } // SetStatus sets the status of the span, if it is recording events. -func (s *Span) SetStatus(status Status) { +func (s *span) SetStatus(status Status) { if !s.IsRecordingEvents() { return } @@ -344,7 +356,7 @@ func (s *Span) SetStatus(status Status) { s.mu.Unlock() } -func (s *Span) interfaceArrayToLinksArray() []Link { +func (s *span) interfaceArrayToLinksArray() []Link { linksArr := make([]Link, 0, len(s.links.queue)) for _, value := range s.links.queue { linksArr = append(linksArr, value.(Link)) @@ -352,7 +364,7 @@ func (s *Span) interfaceArrayToLinksArray() []Link { return linksArr } -func (s *Span) interfaceArrayToMessageEventArray() []MessageEvent { +func (s *span) interfaceArrayToMessageEventArray() []MessageEvent { messageEventArr := make([]MessageEvent, 0, len(s.messageEvents.queue)) for _, value := range s.messageEvents.queue { messageEventArr = append(messageEventArr, value.(MessageEvent)) @@ -360,7 +372,7 @@ func (s *Span) interfaceArrayToMessageEventArray() []MessageEvent { return messageEventArr } -func (s *Span) interfaceArrayToAnnotationArray() []Annotation { +func (s *span) interfaceArrayToAnnotationArray() []Annotation { annotationArr := make([]Annotation, 0, len(s.annotations.queue)) for _, value := range s.annotations.queue { annotationArr = append(annotationArr, value.(Annotation)) @@ -368,7 +380,7 @@ func (s *Span) interfaceArrayToAnnotationArray() []Annotation { return annotationArr } -func (s *Span) lruAttributesToAttributeMap() map[string]interface{} { +func (s *span) lruAttributesToAttributeMap() map[string]interface{} { attributes := make(map[string]interface{}, s.lruAttributes.len()) for _, key := range s.lruAttributes.keys() { value, ok := s.lruAttributes.get(key) @@ -380,13 +392,13 @@ func (s *Span) lruAttributesToAttributeMap() map[string]interface{} { return attributes } -func (s *Span) copyToCappedAttributes(attributes []Attribute) { +func (s *span) copyToCappedAttributes(attributes []Attribute) { for _, a := range attributes { s.lruAttributes.add(a.key, a.value) } } -func (s *Span) addChild() { +func (s *span) addChild() { if !s.IsRecordingEvents() { return } @@ -398,7 +410,7 @@ func (s *Span) addChild() { // AddAttributes sets attributes in the span. // // Existing attributes whose keys appear in the attributes parameter are overwritten. -func (s *Span) AddAttributes(attributes ...Attribute) { +func (s *span) AddAttributes(attributes ...Attribute) { if !s.IsRecordingEvents() { return } @@ -407,49 +419,27 @@ func (s *Span) AddAttributes(attributes ...Attribute) { s.mu.Unlock() } -// copyAttributes copies a slice of Attributes into a map. -func copyAttributes(m map[string]interface{}, attributes []Attribute) { - for _, a := range attributes { - m[a.key] = a.value - } -} - -func (s *Span) lazyPrintfInternal(attributes []Attribute, format string, a ...interface{}) { +func (s *span) printStringInternal(attributes []Attribute, str string) { now := time.Now() - msg := fmt.Sprintf(format, a...) - var m map[string]interface{} - s.mu.Lock() + var am map[string]interface{} if len(attributes) != 0 { - m = make(map[string]interface{}, len(attributes)) - copyAttributes(m, attributes) + am = make(map[string]interface{}, len(attributes)) + for _, attr := range attributes { + am[attr.key] = attr.value + } } - s.annotations.add(Annotation{ - Time: now, - Message: msg, - Attributes: m, - }) - s.mu.Unlock() -} - -func (s *Span) printStringInternal(attributes []Attribute, str string) { - now := time.Now() - var a map[string]interface{} s.mu.Lock() - if len(attributes) != 0 { - a = make(map[string]interface{}, len(attributes)) - copyAttributes(a, attributes) - } s.annotations.add(Annotation{ Time: now, Message: str, - Attributes: a, + Attributes: am, }) s.mu.Unlock() } // Annotate adds an annotation with attributes. // Attributes can be nil. -func (s *Span) Annotate(attributes []Attribute, str string) { +func (s *span) Annotate(attributes []Attribute, str string) { if !s.IsRecordingEvents() { return } @@ -457,11 +447,11 @@ func (s *Span) Annotate(attributes []Attribute, str string) { } // Annotatef adds an annotation with attributes. -func (s *Span) Annotatef(attributes []Attribute, format string, a ...interface{}) { +func (s *span) Annotatef(attributes []Attribute, format string, a ...interface{}) { if !s.IsRecordingEvents() { return } - s.lazyPrintfInternal(attributes, format, a...) + s.printStringInternal(attributes, fmt.Sprintf(format, a...)) } // AddMessageSendEvent adds a message send event to the span. @@ -470,7 +460,7 @@ func (s *Span) Annotatef(attributes []Attribute, format string, a ...interface{} // unique in this span and the same between the send event and the receive // event (this allows to identify a message between the sender and receiver). // For example, this could be a sequence id. -func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) { +func (s *span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) { if !s.IsRecordingEvents() { return } @@ -492,7 +482,7 @@ func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedBy // unique in this span and the same between the send event and the receive // event (this allows to identify a message between the sender and receiver). // For example, this could be a sequence id. -func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) { +func (s *span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) { if !s.IsRecordingEvents() { return } @@ -509,7 +499,7 @@ func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compresse } // AddLink adds a link to the span. -func (s *Span) AddLink(l Link) { +func (s *span) AddLink(l Link) { if !s.IsRecordingEvents() { return } @@ -518,7 +508,7 @@ func (s *Span) AddLink(l Link) { s.mu.Unlock() } -func (s *Span) String() string { +func (s *span) String() string { if s == nil { return "" } @@ -534,20 +524,9 @@ func (s *Span) String() string { var config atomic.Value // access atomically func init() { - gen := &defaultIDGenerator{} - // initialize traceID and spanID generators. - var rngSeed int64 - for _, p := range []interface{}{ - &rngSeed, &gen.traceIDAdd, &gen.nextSpanID, &gen.spanIDInc, - } { - binary.Read(crand.Reader, binary.LittleEndian, p) - } - gen.traceIDRand = rand.New(rand.NewSource(rngSeed)) - gen.spanIDInc |= 1 - config.Store(&Config{ DefaultSampler: ProbabilitySampler(defaultSamplingProbability), - IDGenerator: gen, + IDGenerator: &defaultIDGenerator{}, MaxAttributesPerSpan: DefaultMaxAttributesPerSpan, MaxAnnotationEventsPerSpan: DefaultMaxAnnotationEventsPerSpan, MaxMessageEventsPerSpan: DefaultMaxMessageEventsPerSpan, @@ -571,6 +550,24 @@ type defaultIDGenerator struct { traceIDAdd [2]uint64 traceIDRand *rand.Rand + + initOnce sync.Once +} + +// init initializes the generator on the first call to avoid consuming entropy +// unnecessarily. +func (gen *defaultIDGenerator) init() { + gen.initOnce.Do(func() { + // initialize traceID and spanID generators. + var rngSeed int64 + for _, p := range []interface{}{ + &rngSeed, &gen.traceIDAdd, &gen.nextSpanID, &gen.spanIDInc, + } { + binary.Read(crand.Reader, binary.LittleEndian, p) + } + gen.traceIDRand = rand.New(rand.NewSource(rngSeed)) + gen.spanIDInc |= 1 + }) } // NewSpanID returns a non-zero span ID from a randomly-chosen sequence. diff --git a/src/runtime/vendor/go.opencensus.io/trace/trace_api.go b/src/runtime/vendor/go.opencensus.io/trace/trace_api.go new file mode 100644 index 000000000..9e2c3a999 --- /dev/null +++ b/src/runtime/vendor/go.opencensus.io/trace/trace_api.go @@ -0,0 +1,265 @@ +// Copyright 2020, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "context" +) + +// DefaultTracer is the tracer used when package-level exported functions are invoked. +var DefaultTracer Tracer = &tracer{} + +// Tracer can start spans and access context functions. +type Tracer interface { + + // StartSpan starts a new child span of the current span in the context. If + // there is no span in the context, creates a new trace and span. + // + // Returned context contains the newly created span. You can use it to + // propagate the returned span in process. + StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) + + // StartSpanWithRemoteParent starts a new child span of the span from the given parent. + // + // If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is + // preferred for cases where the parent is propagated via an incoming request. + // + // Returned context contains the newly created span. You can use it to + // propagate the returned span in process. + StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) + + // FromContext returns the Span stored in a context, or nil if there isn't one. + FromContext(ctx context.Context) *Span + + // NewContext returns a new context with the given Span attached. + NewContext(parent context.Context, s *Span) context.Context +} + +// StartSpan starts a new child span of the current span in the context. If +// there is no span in the context, creates a new trace and span. +// +// Returned context contains the newly created span. You can use it to +// propagate the returned span in process. +func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) { + return DefaultTracer.StartSpan(ctx, name, o...) +} + +// StartSpanWithRemoteParent starts a new child span of the span from the given parent. +// +// If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is +// preferred for cases where the parent is propagated via an incoming request. +// +// Returned context contains the newly created span. You can use it to +// propagate the returned span in process. +func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) { + return DefaultTracer.StartSpanWithRemoteParent(ctx, name, parent, o...) +} + +// FromContext returns the Span stored in a context, or a Span that is not +// recording events if there isn't one. +func FromContext(ctx context.Context) *Span { + return DefaultTracer.FromContext(ctx) +} + +// NewContext returns a new context with the given Span attached. +func NewContext(parent context.Context, s *Span) context.Context { + return DefaultTracer.NewContext(parent, s) +} + +// SpanInterface represents a span of a trace. It has an associated SpanContext, and +// stores data accumulated while the span is active. +// +// Ideally users should interact with Spans by calling the functions in this +// package that take a Context parameter. +type SpanInterface interface { + + // IsRecordingEvents returns true if events are being recorded for this span. + // Use this check to avoid computing expensive annotations when they will never + // be used. + IsRecordingEvents() bool + + // End ends the span. + End() + + // SpanContext returns the SpanContext of the span. + SpanContext() SpanContext + + // SetName sets the name of the span, if it is recording events. + SetName(name string) + + // SetStatus sets the status of the span, if it is recording events. + SetStatus(status Status) + + // AddAttributes sets attributes in the span. + // + // Existing attributes whose keys appear in the attributes parameter are overwritten. + AddAttributes(attributes ...Attribute) + + // Annotate adds an annotation with attributes. + // Attributes can be nil. + Annotate(attributes []Attribute, str string) + + // Annotatef adds an annotation with attributes. + Annotatef(attributes []Attribute, format string, a ...interface{}) + + // AddMessageSendEvent adds a message send event to the span. + // + // messageID is an identifier for the message, which is recommended to be + // unique in this span and the same between the send event and the receive + // event (this allows to identify a message between the sender and receiver). + // For example, this could be a sequence id. + AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) + + // AddMessageReceiveEvent adds a message receive event to the span. + // + // messageID is an identifier for the message, which is recommended to be + // unique in this span and the same between the send event and the receive + // event (this allows to identify a message between the sender and receiver). + // For example, this could be a sequence id. + AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) + + // AddLink adds a link to the span. + AddLink(l Link) + + // String prints a string representation of a span. + String() string +} + +// NewSpan is a convenience function for creating a *Span out of a *span +func NewSpan(s SpanInterface) *Span { + return &Span{internal: s} +} + +// Span is a struct wrapper around the SpanInt interface, which allows correctly handling +// nil spans, while also allowing the SpanInterface implementation to be swapped out. +type Span struct { + internal SpanInterface +} + +// Internal returns the underlying implementation of the Span +func (s *Span) Internal() SpanInterface { + return s.internal +} + +// IsRecordingEvents returns true if events are being recorded for this span. +// Use this check to avoid computing expensive annotations when they will never +// be used. +func (s *Span) IsRecordingEvents() bool { + if s == nil { + return false + } + return s.internal.IsRecordingEvents() +} + +// End ends the span. +func (s *Span) End() { + if s == nil { + return + } + s.internal.End() +} + +// SpanContext returns the SpanContext of the span. +func (s *Span) SpanContext() SpanContext { + if s == nil { + return SpanContext{} + } + return s.internal.SpanContext() +} + +// SetName sets the name of the span, if it is recording events. +func (s *Span) SetName(name string) { + if !s.IsRecordingEvents() { + return + } + s.internal.SetName(name) +} + +// SetStatus sets the status of the span, if it is recording events. +func (s *Span) SetStatus(status Status) { + if !s.IsRecordingEvents() { + return + } + s.internal.SetStatus(status) +} + +// AddAttributes sets attributes in the span. +// +// Existing attributes whose keys appear in the attributes parameter are overwritten. +func (s *Span) AddAttributes(attributes ...Attribute) { + if !s.IsRecordingEvents() { + return + } + s.internal.AddAttributes(attributes...) +} + +// Annotate adds an annotation with attributes. +// Attributes can be nil. +func (s *Span) Annotate(attributes []Attribute, str string) { + if !s.IsRecordingEvents() { + return + } + s.internal.Annotate(attributes, str) +} + +// Annotatef adds an annotation with attributes. +func (s *Span) Annotatef(attributes []Attribute, format string, a ...interface{}) { + if !s.IsRecordingEvents() { + return + } + s.internal.Annotatef(attributes, format, a...) +} + +// AddMessageSendEvent adds a message send event to the span. +// +// messageID is an identifier for the message, which is recommended to be +// unique in this span and the same between the send event and the receive +// event (this allows to identify a message between the sender and receiver). +// For example, this could be a sequence id. +func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) { + if !s.IsRecordingEvents() { + return + } + s.internal.AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize) +} + +// AddMessageReceiveEvent adds a message receive event to the span. +// +// messageID is an identifier for the message, which is recommended to be +// unique in this span and the same between the send event and the receive +// event (this allows to identify a message between the sender and receiver). +// For example, this could be a sequence id. +func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) { + if !s.IsRecordingEvents() { + return + } + s.internal.AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize) +} + +// AddLink adds a link to the span. +func (s *Span) AddLink(l Link) { + if !s.IsRecordingEvents() { + return + } + s.internal.AddLink(l) +} + +// String prints a string representation of a span. +func (s *Span) String() string { + if s == nil { + return "" + } + return s.internal.String() +} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/.gitattributes b/src/runtime/vendor/go.opentelemetry.io/otel/.gitattributes new file mode 100644 index 000000000..314766e91 --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/.gitattributes @@ -0,0 +1,3 @@ +* text=auto eol=lf +*.{cmd,[cC][mM][dD]} text eol=crlf +*.{bat,[bB][aA][tT]} text eol=crlf diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/.gitignore b/src/runtime/vendor/go.opentelemetry.io/otel/.gitignore index 90bd682fd..759cf53e0 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/.gitignore +++ b/src/runtime/vendor/go.opentelemetry.io/otel/.gitignore @@ -10,14 +10,12 @@ coverage.* gen/ -/example/basic/basic -/example/grpc/client/client -/example/grpc/server/server -/example/http/client/client -/example/http/server/server +/example/fib/fib /example/jaeger/jaeger /example/namedtracer/namedtracer /example/opencensus/opencensus +/example/passthrough/passthrough /example/prometheus/prometheus +/example/prom-collector/prom-collector /example/zipkin/zipkin /example/otel-collector/otel-collector diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/.golangci.yml b/src/runtime/vendor/go.opentelemetry.io/otel/.golangci.yml index 2ef168198..d40bdedc3 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/src/runtime/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -7,7 +7,7 @@ linters: enable: - misspell - goimports - - golint + - revive - gofmt issues: @@ -16,12 +16,12 @@ issues: - path: _test\.go text: "context.Context should be the first parameter of a function" linters: - - golint + - revive # Yes, they are, but it's okay in a test - path: _test\.go text: "exported func.*returns unexported type.*which can be annoying to use" linters: - - golint + - revive linters-settings: misspell: diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/.markdown-link.json b/src/runtime/vendor/go.opentelemetry.io/otel/.markdown-link.json new file mode 100644 index 000000000..f222ad89c --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/.markdown-link.json @@ -0,0 +1,16 @@ +{ + "ignorePatterns": [ + { + "pattern": "^http(s)?://localhost" + } + ], + "replacementPatterns": [ + { + "pattern": "^/registry", + "replacement": "https://opentelemetry.io/registry" + } + ], + "retryOn429": true, + "retryCount": 5, + "fallbackRetryDelay": "30s" +} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/.markdownlint.yaml b/src/runtime/vendor/go.opentelemetry.io/otel/.markdownlint.yaml new file mode 100644 index 000000000..3202496c3 --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/.markdownlint.yaml @@ -0,0 +1,29 @@ +# Default state for all rules +default: true + +# ul-style +MD004: false + +# hard-tabs +MD010: false + +# line-length +MD013: false + +# no-duplicate-header +MD024: + siblings_only: true + +#single-title +MD025: false + +# ol-prefix +MD029: + style: ordered + +# no-inline-html +MD033: false + +# fenced-code-language +MD040: false + diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/src/runtime/vendor/go.opentelemetry.io/otel/CHANGELOG.md index 9092b5d61..2cb46459c 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/src/runtime/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,6 +8,569 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] +### Changed + +- NoopMeterProvider is now private and NewNoopMeterProvider must be used to obtain a noopMeterProvider. (#2237) + +## [1.0.0] - 2021-09-20 + +This is the first stable release for the project. +This release includes an API and SDK for the tracing signal that will comply with the stability guarantees defined by the projects [versioning policy](./VERSIONING.md). + +### Added + +- OTLP trace exporter now sets the `SchemaURL` field in the exported telemetry if the Tracer has `WithSchemaURL` option. (#2242) + +### Fixed + +- Slice-valued attributes can correctly be used as map keys. (#2223) + +### Removed + +- Removed the `"go.opentelemetry.io/otel/exporters/zipkin".WithSDKOptions` function. (#2248) +- Removed the deprecated package `go.opentelemetry.io/otel/oteltest`. (#2234) +- Removed the deprecated package `go.opentelemetry.io/otel/bridge/opencensus/utils`. (#2233) +- Removed deprecated functions, types, and methods from `go.opentelemetry.io/otel/attribute` package. + Use the typed functions and methods added to the package instead. (#2235) + - The `Key.Array` method is removed. + - The `Array` function is removed. + - The `Any` function is removed. + - The `ArrayValue` function is removed. + - The `AsArray` function is removed. + +## [1.0.0-RC3] - 2021-09-02 + +### Added + +- Added `ErrorHandlerFunc` to use a function as an `"go.opentelemetry.io/otel".ErrorHandler`. (#2149) +- Added `"go.opentelemetry.io/otel/trace".WithStackTrace` option to add a stack trace when using `span.RecordError` or when panic is handled in `span.End`. (#2163) +- Added typed slice attribute types and functionality to the `go.opentelemetry.io/otel/attribute` package to replace the existing array type and functions. (#2162) + - `BoolSlice`, `IntSlice`, `Int64Slice`, `Float64Slice`, and `StringSlice` replace the use of the `Array` function in the package. +- Added the `go.opentelemetry.io/otel/example/fib` example package. + Included is an example application that computes Fibonacci numbers. (#2203) + +### Changed + +- Metric instruments have been renamed to match the (feature-frozen) metric API specification: + - ValueRecorder becomes Histogram + - ValueObserver becomes Gauge + - SumObserver becomes CounterObserver + - UpDownSumObserver becomes UpDownCounterObserver + The API exported from this project is still considered experimental. (#2202) +- Metric SDK/API implementation type `InstrumentKind` moves into `sdkapi` sub-package. (#2091) +- The Metrics SDK export record no longer contains a Resource pointer, the SDK `"go.opentelemetry.io/otel/sdk/trace/export/metric".Exporter.Export()` function for push-based exporters now takes a single Resource argument, pull-based exporters use `"go.opentelemetry.io/otel/sdk/metric/controller/basic".Controller.Resource()`. (#2120) +- The JSON output of the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` is harmonized now such that the output is "plain" JSON objects after each other of the form `{ ... } { ... } { ... }`. Earlier the JSON objects describing a span were wrapped in a slice for each `Exporter.ExportSpans` call, like `[ { ... } ][ { ... } { ... } ]`. Outputting JSON object directly after each other is consistent with JSON loggers, and a bit easier to parse and read. (#2196) +- Update the `NewTracerConfig`, `NewSpanStartConfig`, `NewSpanEndConfig`, and `NewEventConfig` function in the `go.opentelemetry.io/otel/trace` package to return their respective configurations as structs instead of pointers to the struct. (#2212) + +### Deprecated + +- The `go.opentelemetry.io/otel/bridge/opencensus/utils` package is deprecated. + All functionality from this package now exists in the `go.opentelemetry.io/otel/bridge/opencensus` package. + The functions from that package should be used instead. (#2166) +- The `"go.opentelemetry.io/otel/attribute".Array` function and the related `ARRAY` value type is deprecated. + Use the typed `*Slice` functions and types added to the package instead. (#2162) +- The `"go.opentelemetry.io/otel/attribute".Any` function is deprecated. + Use the typed functions instead. (#2181) +- The `go.opentelemetry.io/otel/oteltest` package is deprecated. + The `"go.opentelemetry.io/otel/sdk/trace/tracetest".SpanRecorder` can be registered with the default SDK (`go.opentelemetry.io/otel/sdk/trace`) as a `SpanProcessor` and used as a replacement for this deprecated package. (#2188) + +### Removed + +- Removed metrics test package `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#2105) + +### Fixed + +- The `fromEnv` detector no longer throws an error when `OTEL_RESOURCE_ATTRIBUTES` environment variable is not set or empty. (#2138) +- Setting the global `ErrorHandler` with `"go.opentelemetry.io/otel".SetErrorHandler` multiple times is now supported. (#2160, #2140) +- The `"go.opentelemetry.io/otel/attribute".Any` function now supports `int32` values. (#2169) +- Multiple calls to `"go.opentelemetry.io/otel/sdk/metric/controller/basic".WithResource()` are handled correctly, and when no resources are provided `"go.opentelemetry.io/otel/sdk/resource".Default()` is used. (#2120) +- The `WithoutTimestamps` option for the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter causes the exporter to correctly ommit timestamps. (#2195) +- Fixed typos in resources.go. (#2201) + +## [1.0.0-RC2] - 2021-07-26 + +### Added + +- Added `WithOSDescription` resource configuration option to set OS (Operating System) description resource attribute (`os.description`). (#1840) +- Added `WithOS` resource configuration option to set all OS (Operating System) resource attributes at once. (#1840) +- Added the `WithRetry` option to the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package. + This option is a replacement for the removed `WithMaxAttempts` and `WithBackoff` options. (#2095) +- Added API `LinkFromContext` to return Link which encapsulates SpanContext from provided context and also encapsulates attributes. (#2115) +- Added a new `Link` type under the SDK `otel/sdk/trace` package that counts the number of attributes that were dropped for surpassing the `AttributePerLinkCountLimit` configured in the Span's `SpanLimits`. + This new type replaces the equal-named API `Link` type found in the `otel/trace` package for most usages within the SDK. + For example, instances of this type are now returned by the `Links()` function of `ReadOnlySpan`s provided in places like the `OnEnd` function of `SpanProcessor` implementations. (#2118) +- Added the `SpanRecorder` type to the `go.opentelemetry.io/otel/skd/trace/tracetest` package. + This type can be used with the default SDK as a `SpanProcessor` during testing. (#2132) + +### Changed + +- The `SpanModels` function is now exported from the `go.opentelemetry.io/otel/exporters/zipkin` package to convert OpenTelemetry spans into Zipkin model spans. (#2027) +- Rename the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".RetrySettings` to `RetryConfig`. (#2095) + +### Deprecated + +- The `TextMapCarrier` and `TextMapPropagator` from the `go.opentelemetry.io/otel/oteltest` package and their associated creation functions (`TextMapCarrier`, `NewTextMapPropagator`) are deprecated. (#2114) +- The `Harness` type from the `go.opentelemetry.io/otel/oteltest` package and its associated creation function, `NewHarness` are deprecated and will be removed in the next release. (#2123) +- The `TraceStateFromKeyValues` function from the `go.opentelemetry.io/otel/oteltest` package is deprecated. + Use the `trace.ParseTraceState` function instead. (#2122) + +### Removed + +- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/jaeger`. (#2020) +- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/zipkin`. (#2020) +- Removed the `"go.opentelemetry.io/otel/sdk/resource".WithBuiltinDetectors` function. + The explicit `With*` options for every built-in detector should be used instead. (#2026 #2097) +- Removed the `WithMaxAttempts` and `WithBackoff` options from the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package. + The retry logic of the package has been updated to match the `otlptracegrpc` package and accordingly a `WithRetry` option is added that should be used instead. (#2095) +- Removed `DroppedAttributeCount` field from `otel/trace.Link` struct. (#2118) + +### Fixed + +- When using WithNewRoot, don't use the parent context for making sampling decisions. (#2032) +- `oteltest.Tracer` now creates a valid `SpanContext` when using `WithNewRoot`. (#2073) +- OS type detector now sets the correct `dragonflybsd` value for DragonFly BSD. (#2092) +- The OTel span status is correctly transformed into the OTLP status in the `go.opentelemetry.io/otel/exporters/otlp/otlptrace` package. + This fix will by default set the status to `Unset` if it is not explicitly set to `Ok` or `Error`. (#2099 #2102) +- The `Inject` method for the `"go.opentelemetry.io/otel/propagation".TraceContext` type no longer injects empty `tracestate` values. (#2108) +- Use `6831` as default Jaeger agent port instead of `6832`. (#2131) + +## [Experimental Metrics v0.22.0] - 2021-07-19 + +### Added + +- Adds HTTP support for OTLP metrics exporter. (#2022) + +### Removed + +- Removed the deprecated package `go.opentelemetry.io/otel/exporters/metric/prometheus`. (#2020) + +## [1.0.0-RC1] / 0.21.0 - 2021-06-18 + +With this release we are introducing a split in module versions. The tracing API and SDK are entering the `v1.0.0` Release Candidate phase with `v1.0.0-RC1` +while the experimental metrics API and SDK continue with `v0.x` releases at `v0.21.0`. Modules at major version 1 or greater will not depend on modules +with major version 0. + +### Added + +- Adds `otlpgrpc.WithRetry`option for configuring the retry policy for transient errors on the otlp/gRPC exporter. (#1832) + - The following status codes are defined as transient errors: + | gRPC Status Code | Description | + | ---------------- | ----------- | + | 1 | Cancelled | + | 4 | Deadline Exceeded | + | 8 | Resource Exhausted | + | 10 | Aborted | + | 10 | Out of Range | + | 14 | Unavailable | + | 15 | Data Loss | +- Added `Status` type to the `go.opentelemetry.io/otel/sdk/trace` package to represent the status of a span. (#1874) +- Added `SpanStub` type and its associated functions to the `go.opentelemetry.io/otel/sdk/trace/tracetest` package. + This type can be used as a testing replacement for the `SpanSnapshot` that was removed from the `go.opentelemetry.io/otel/sdk/trace` package. (#1873) +- Adds support for scheme in `OTEL_EXPORTER_OTLP_ENDPOINT` according to the spec. (#1886) +- Adds `trace.WithSchemaURL` option for configuring the tracer with a Schema URL. (#1889) +- Added an example of using OpenTelemetry Go as a trace context forwarder. (#1912) +- `ParseTraceState` is added to the `go.opentelemetry.io/otel/trace` package. + It can be used to decode a `TraceState` from a `tracestate` header string value. (#1937) +- Added `Len` method to the `TraceState` type in the `go.opentelemetry.io/otel/trace` package. + This method returns the number of list-members the `TraceState` holds. (#1937) +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace` that defines a trace exporter that uses a `otlptrace.Client` to send data. + Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` implementing a gRPC `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing .(#1922) +- Added `Baggage`, `Member`, and `Property` types to the `go.opentelemetry.io/otel/baggage` package along with their related functions. (#1967) +- Added `ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext` functions to the `go.opentelemetry.io/otel/baggage` package. + These functions replace the `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions from that package and directly work with the new `Baggage` type. (#1967) +- The `OTEL_SERVICE_NAME` environment variable is the preferred source for `service.name`, used by the environment resource detector if a service name is present both there and in `OTEL_RESOURCE_ATTRIBUTES`. (#1969) +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` implementing an HTTP `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing. (#1963) +- Changes `go.opentelemetry.io/otel/sdk/resource.NewWithAttributes` to require a schema URL. The old function is still available as `resource.NewSchemaless`. This is a breaking change. (#1938) +- Several builtin resource detectors now correctly populate the schema URL. (#1938) +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` that defines a metrics exporter that uses a `otlpmetric.Client` to send data. +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` implementing a gRPC `otlpmetric.Client` and offers convenience functions, `New` and `NewUnstarted`, to create an `otlpmetric.Exporter`.(#1991) +- Added `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter. (#2005) +- Added `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` exporter. (#2005) +- Added a `TracerProvider()` method to the `"go.opentelemetry.io/otel/trace".Span` interface. This can be used to obtain a `TracerProvider` from a given span that utilizes the same trace processing pipeline. (#2009) + +### Changed + +- Make `NewSplitDriver` from `go.opentelemetry.io/otel/exporters/otlp` take variadic arguments instead of a `SplitConfig` item. + `NewSplitDriver` now automatically implements an internal `noopDriver` for `SplitConfig` fields that are not initialized. (#1798) +- `resource.New()` now creates a Resource without builtin detectors. Previous behavior is now achieved by using `WithBuiltinDetectors` Option. (#1810) +- Move the `Event` type from the `go.opentelemetry.io/otel` package to the `go.opentelemetry.io/otel/sdk/trace` package. (#1846) +- CI builds validate against last two versions of Go, dropping 1.14 and adding 1.16. (#1865) +- BatchSpanProcessor now report export failures when calling `ForceFlush()` method. (#1860) +- `Set.Encoded(Encoder)` no longer caches the result of an encoding. (#1855) +- Renamed `CloudZoneKey` to `CloudAvailabilityZoneKey` in Resource semantic conventions according to spec. (#1871) +- The `StatusCode` and `StatusMessage` methods of the `ReadOnlySpan` interface and the `Span` produced by the `go.opentelemetry.io/otel/sdk/trace` package have been replaced with a single `Status` method. + This method returns the status of a span using the new `Status` type. (#1874) +- Updated `ExportSpans` method of the`SpanExporter` interface type to accept `ReadOnlySpan`s instead of the removed `SpanSnapshot`. + This brings the export interface into compliance with the specification in that it now accepts an explicitly immutable type instead of just an implied one. (#1873) +- Unembed `SpanContext` in `Link`. (#1877) +- Generate Semantic conventions from the specification YAML. (#1891) +- Spans created by the global `Tracer` obtained from `go.opentelemetry.io/otel`, prior to a functioning `TracerProvider` being set, now propagate the span context from their parent if one exists. (#1901) +- The `"go.opentelemetry.io/otel".Tracer` function now accepts tracer options. (#1902) +- Move the `go.opentelemetry.io/otel/unit` package to `go.opentelemetry.io/otel/metric/unit`. (#1903) +- Changed `go.opentelemetry.io/otel/trace.TracerConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config.) (#1921) +- Changed `go.opentelemetry.io/otel/trace.SpanConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) +- Changed `span.End()` now only accepts Options that are allowed at `End()`. (#1921) +- Changed `go.opentelemetry.io/otel/metric.InstrumentConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) +- Changed `go.opentelemetry.io/otel/metric.MeterConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) +- Refactored option types according to the contribution style guide. (#1882) +- Move the `go.opentelemetry.io/otel/trace.TraceStateFromKeyValues` function to the `go.opentelemetry.io/otel/oteltest` package. + This function is preserved for testing purposes where it may be useful to create a `TraceState` from `attribute.KeyValue`s, but it is not intended for production use. + The new `ParseTraceState` function should be used to create a `TraceState`. (#1931) +- Updated `MarshalJSON` method of the `go.opentelemetry.io/otel/trace.TraceState` type to marshal the type into the string representation of the `TraceState`. (#1931) +- The `TraceState.Delete` method from the `go.opentelemetry.io/otel/trace` package no longer returns an error in addition to a `TraceState`. (#1931) +- Updated `Get` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931) +- Updated `Insert` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a pair of `string`s instead of an `attribute.KeyValue` type. (#1931) +- Updated `Delete` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/stdout` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/metric/prometheus` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985) +- Renamed `NewUnstartedExporter` to `NewUnstarted` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985) +- The `go.opentelemetry.io/otel/semconv` package has been moved to `go.opentelemetry.io/otel/semconv/v1.4.0` to allow for multiple [telemetry schema](https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md) versions to be used concurrently. (#1987) +- Metrics test helpers in `go.opentelemetry.io/otel/oteltest` have been moved to `go.opentelemetry.io/otel/metric/metrictest`. (#1988) + +### Deprecated + +- The `go.opentelemetry.io/otel/exporters/metric/prometheus` is deprecated, use `go.opentelemetry.io/otel/exporters/prometheus` instead. (#1993) +- The `go.opentelemetry.io/otel/exporters/trace/jaeger` is deprecated, use `go.opentelemetry.io/otel/exporters/jaeger` instead. (#1993) +- The `go.opentelemetry.io/otel/exporters/trace/zipkin` is deprecated, use `go.opentelemetry.io/otel/exporters/zipkin` instead. (#1993) + +### Removed + +- Removed `resource.WithoutBuiltin()`. Use `resource.New()`. (#1810) +- Unexported types `resource.FromEnv`, `resource.Host`, and `resource.TelemetrySDK`, Use the corresponding `With*()` to use individually. (#1810) +- Removed the `Tracer` and `IsRecording` method from the `ReadOnlySpan` in the `go.opentelemetry.io/otel/sdk/trace`. + The `Tracer` method is not a required to be included in this interface and given the mutable nature of the tracer that is associated with a span, this method is not appropriate. + The `IsRecording` method returns if the span is recording or not. + A read-only span value does not need to know if updates to it will be recorded or not. + By definition, it cannot be updated so there is no point in communicating if an update is recorded. (#1873) +- Removed the `SpanSnapshot` type from the `go.opentelemetry.io/otel/sdk/trace` package. + The use of this type has been replaced with the use of the explicitly immutable `ReadOnlySpan` type. + When a concrete representation of a read-only span is needed for testing, the newly added `SpanStub` in the `go.opentelemetry.io/otel/sdk/trace/tracetest` package should be used. (#1873) +- Removed the `Tracer` method from the `Span` interface in the `go.opentelemetry.io/otel/trace` package. + Using the same tracer that created a span introduces the error where an instrumentation library's `Tracer` is used by other code instead of their own. + The `"go.opentelemetry.io/otel".Tracer` function or a `TracerProvider` should be used to acquire a library specific `Tracer` instead. (#1900) + - The `TracerProvider()` method on the `Span` interface may also be used to obtain a `TracerProvider` using the same trace processing pipeline. (#2009) +- The `http.url` attribute generated by `HTTPClientAttributesFromHTTPRequest` will no longer include username or password information. (#1919) +- Removed `IsEmpty` method of the `TraceState` type in the `go.opentelemetry.io/otel/trace` package in favor of using the added `TraceState.Len` method. (#1931) +- Removed `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions in the `go.opentelemetry.io/otel/baggage` package. + Handling of baggage is now done using the added `Baggage` type and related context functions (`ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext`) in that package. (#1967) +- The `InstallNewPipeline` and `NewExportPipeline` creation functions in all the exporters (prometheus, otlp, stdout, jaeger, and zipkin) have been removed. + These functions were deemed premature attempts to provide convenience that did not achieve this aim. (#1985) +- The `go.opentelemetry.io/otel/exporters/otlp` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace` instead. (#1990) +- The `go.opentelemetry.io/otel/exporters/stdout` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` or `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` instead. (#2005) + +### Fixed + +- Only report errors from the `"go.opentelemetry.io/otel/sdk/resource".Environment` function when they are not `nil`. (#1850, #1851) +- The `Shutdown` method of the simple `SpanProcessor` in the `go.opentelemetry.io/otel/sdk/trace` package now honors the context deadline or cancellation. (#1616, #1856) +- BatchSpanProcessor now drops span batches that failed to be exported. (#1860) +- Use `http://localhost:14268/api/traces` as default Jaeger collector endpoint instead of `http://localhost:14250`. (#1898) +- Allow trailing and leading whitespace in the parsing of a `tracestate` header. (#1931) +- Add logic to determine if the channel is closed to fix Jaeger exporter test panic with close closed channel. (#1870, #1973) +- Avoid transport security when OTLP endpoint is a Unix socket. (#2001) + +### Security + +## [0.20.0] - 2021-04-23 + +### Added + +- The OTLP exporter now has two new convenience functions, `NewExportPipeline` and `InstallNewPipeline`, setup and install the exporter in tracing and metrics pipelines. (#1373) +- Adds semantic conventions for exceptions. (#1492) +- Added Jaeger Environment variables: `OTEL_EXPORTER_JAEGER_AGENT_HOST`, `OTEL_EXPORTER_JAEGER_AGENT_PORT` + These environment variables can be used to override Jaeger agent hostname and port (#1752) +- Option `ExportTimeout` was added to batch span processor. (#1755) +- `trace.TraceFlags` is now a defined type over `byte` and `WithSampled(bool) TraceFlags` and `IsSampled() bool` methods have been added to it. (#1770) +- The `Event` and `Link` struct types from the `go.opentelemetry.io/otel` package now include a `DroppedAttributeCount` field to record the number of attributes that were not recorded due to configured limits being reached. (#1771) +- The Jaeger exporter now reports dropped attributes for a Span event in the exported log. (#1771) +- Adds test to check BatchSpanProcessor ignores `OnEnd` and `ForceFlush` post `Shutdown`. (#1772) +- Extract resource attributes from the `OTEL_RESOURCE_ATTRIBUTES` environment variable and merge them with the `resource.Default` resource as well as resources provided to the `TracerProvider` and metric `Controller`. (#1785) +- Added `WithOSType` resource configuration option to set OS (Operating System) type resource attribute (`os.type`). (#1788) +- Added `WithProcess*` resource configuration options to set Process resource attributes. (#1788) + - `process.pid` + - `process.executable.name` + - `process.executable.path` + - `process.command_args` + - `process.owner` + - `process.runtime.name` + - `process.runtime.version` + - `process.runtime.description` +- Adds `k8s.node.name` and `k8s.node.uid` attribute keys to the `semconv` package. (#1789) +- Added support for configuring OTLP/HTTP and OTLP/gRPC Endpoints, TLS Certificates, Headers, Compression and Timeout via Environment Variables. (#1758, #1769 and #1811) + - `OTEL_EXPORTER_OTLP_ENDPOINT` + - `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` + - `OTEL_EXPORTER_OTLP_METRICS_ENDPOINT` + - `OTEL_EXPORTER_OTLP_HEADERS` + - `OTEL_EXPORTER_OTLP_TRACES_HEADERS` + - `OTEL_EXPORTER_OTLP_METRICS_HEADERS` + - `OTEL_EXPORTER_OTLP_COMPRESSION` + - `OTEL_EXPORTER_OTLP_TRACES_COMPRESSION` + - `OTEL_EXPORTER_OTLP_METRICS_COMPRESSION` + - `OTEL_EXPORTER_OTLP_TIMEOUT` + - `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` + - `OTEL_EXPORTER_OTLP_METRICS_TIMEOUT` + - `OTEL_EXPORTER_OTLP_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE` +- Adds `otlpgrpc.WithTimeout` option for configuring timeout to the otlp/gRPC exporter. (#1821) +- Adds `jaeger.WithMaxPacketSize` option for configuring maximum UDP packet size used when connecting to the Jaeger agent. (#1853) + +### Fixed + +- The `Span.IsRecording` implementation from `go.opentelemetry.io/otel/sdk/trace` always returns false when not being sampled. (#1750) +- The Jaeger exporter now correctly sets tags for the Span status code and message. + This means it uses the correct tag keys (`"otel.status_code"`, `"otel.status_description"`) and does not set the status message as a tag unless it is set on the span. (#1761) +- The Jaeger exporter now correctly records Span event's names using the `"event"` key for a tag. + Additionally, this tag is overridden, as specified in the OTel specification, if the event contains an attribute with that key. (#1768) +- Zipkin Exporter: Ensure mapping between OTel and Zipkin span data complies with the specification. (#1688) +- Fixed typo for default service name in Jaeger Exporter. (#1797) +- Fix flaky OTLP for the reconnnection of the client connection. (#1527, #1814) +- Fix Jaeger exporter dropping of span batches that exceed the UDP packet size limit. + Instead, the exporter now splits the batch into smaller sendable batches. (#1828) + +### Changed + +- Span `RecordError` now records an `exception` event to comply with the semantic convention specification. (#1492) +- Jaeger exporter was updated to use thrift v0.14.1. (#1712) +- Migrate from using internally built and maintained version of the OTLP to the one hosted at `go.opentelemetry.io/proto/otlp`. (#1713) +- Migrate from using `github.com/gogo/protobuf` to `google.golang.org/protobuf` to match `go.opentelemetry.io/proto/otlp`. (#1713) +- The storage of a local or remote Span in a `context.Context` using its SpanContext is unified to store just the current Span. + The Span's SpanContext can now self-identify as being remote or not. + This means that `"go.opentelemetry.io/otel/trace".ContextWithRemoteSpanContext` will now overwrite any existing current Span, not just existing remote Spans, and make it the current Span in a `context.Context`. (#1731) +- Improve OTLP/gRPC exporter connection errors. (#1737) +- Information about a parent span context in a `"go.opentelemetry.io/otel/export/trace".SpanSnapshot` is unified in a new `Parent` field. + The existing `ParentSpanID` and `HasRemoteParent` fields are removed in favor of this. (#1748) +- The `ParentContext` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is updated to hold a `context.Context` containing the parent span. + This changes it to make `SamplingParameters` conform with the OpenTelemetry specification. (#1749) +- Updated Jaeger Environment Variables: `JAEGER_ENDPOINT`, `JAEGER_USER`, `JAEGER_PASSWORD` + to `OTEL_EXPORTER_JAEGER_ENDPOINT`, `OTEL_EXPORTER_JAEGER_USER`, `OTEL_EXPORTER_JAEGER_PASSWORD` in compliance with OTel specification. (#1752) +- Modify `BatchSpanProcessor.ForceFlush` to abort after timeout/cancellation. (#1757) +- The `DroppedAttributeCount` field of the `Span` in the `go.opentelemetry.io/otel` package now only represents the number of attributes dropped for the span itself. + It no longer is a conglomerate of itself, events, and link attributes that have been dropped. (#1771) +- Make `ExportSpans` in Jaeger Exporter honor context deadline. (#1773) +- Modify Zipkin Exporter default service name, use default resource's serviceName instead of empty. (#1777) +- The `go.opentelemetry.io/otel/sdk/export/trace` package is merged into the `go.opentelemetry.io/otel/sdk/trace` package. (#1778) +- The prometheus.InstallNewPipeline example is moved from comment to example test (#1796) +- The convenience functions for the stdout exporter have been updated to return the `TracerProvider` implementation and enable the shutdown of the exporter. (#1800) +- Replace the flush function returned from the Jaeger exporter's convenience creation functions (`InstallNewPipeline` and `NewExportPipeline`) with the `TracerProvider` implementation they create. + This enables the caller to shutdown and flush using the related `TracerProvider` methods. (#1822) +- Updated the Jaeger exporter to have a default endpoint, `http://localhost:14250`, for the collector. (#1824) +- Changed the function `WithCollectorEndpoint` in the Jaeger exporter to no longer accept an endpoint as an argument. + The endpoint can be passed with the `CollectorEndpointOption` using the `WithEndpoint` function or by setting the `OTEL_EXPORTER_JAEGER_ENDPOINT` environment variable value appropriately. (#1824) +- The Jaeger exporter no longer batches exported spans itself, instead it relies on the SDK's `BatchSpanProcessor` for this functionality. (#1830) +- The Jaeger exporter creation functions (`NewRawExporter`, `NewExportPipeline`, and `InstallNewPipeline`) no longer accept the removed `Option` type as a variadic argument. (#1830) + +### Removed + +- Removed Jaeger Environment variables: `JAEGER_SERVICE_NAME`, `JAEGER_DISABLED`, `JAEGER_TAGS` + These environment variables will no longer be used to override values of the Jaeger exporter (#1752) +- No longer set the links for a `Span` in `go.opentelemetry.io/otel/sdk/trace` that is configured to be a new root. + This is unspecified behavior that the OpenTelemetry community plans to standardize in the future. + To prevent backwards incompatible changes when it is specified, these links are removed. (#1726) +- Setting error status while recording error with Span from oteltest package. (#1729) +- The concept of a remote and local Span stored in a context is unified to just the current Span. + Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed. + Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContex` can be used to return the current Span. + If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731) +- The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed. + This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749) +- The `trace.FlagsDebug` and `trace.FlagsDeferred` constants have been removed and will be localized to the B3 propagator. (#1770) +- Remove `Process` configuration, `WithProcessFromEnv` and `ProcessFromEnv`, and type from the Jaeger exporter package. + The information that could be configured in the `Process` struct should be configured in a `Resource` instead. (#1776, #1804) +- Remove the `WithDisabled` option from the Jaeger exporter. + To disable the exporter unregister it from the `TracerProvider` or use a no-operation `TracerProvider`. (#1806) +- Removed the functions `CollectorEndpointFromEnv` and `WithCollectorEndpointOptionFromEnv` from the Jaeger exporter. + These functions for retrieving specific environment variable values are redundant of other internal functions and + are not intended for end user use. (#1824) +- Removed the Jaeger exporter `WithSDKOptions` `Option`. + This option was used to set SDK options for the exporter creation convenience functions. + These functions are provided as a way to easily setup or install the exporter with what are deemed reasonable SDK settings for common use cases. + If the SDK needs to be configured differently, the `NewRawExporter` function and direct setup of the SDK with the desired settings should be used. (#1825) +- The `WithBufferMaxCount` and `WithBatchMaxCount` `Option`s from the Jaeger exporter are removed. + The exporter no longer batches exports, instead relying on the SDK's `BatchSpanProcessor` for this functionality. (#1830) +- The Jaeger exporter `Option` type is removed. + The type is no longer used by the exporter to configure anything. + All the previous configurations these options provided were duplicates of SDK configuration. + They have been removed in favor of using the SDK configuration and focuses the exporter configuration to be only about the endpoints it will send telemetry to. (#1830) + +## [0.19.0] - 2021-03-18 + +### Added + +- Added `Marshaler` config option to `otlphttp` to enable otlp over json or protobufs. (#1586) +- A `ForceFlush` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` to flush all registered `SpanProcessor`s. (#1608) +- Added `WithSampler` and `WithSpanLimits` to tracer provider. (#1633, #1702) +- `"go.opentelemetry.io/otel/trace".SpanContext` now has a `remote` property, and `IsRemote()` predicate, that is true when the `SpanContext` has been extracted from remote context data. (#1701) +- A `Valid` method to the `"go.opentelemetry.io/otel/attribute".KeyValue` type. (#1703) + +### Changed + +- `trace.SpanContext` is now immutable and has no exported fields. (#1573) + - `trace.NewSpanContext()` can be used in conjunction with the `trace.SpanContextConfig` struct to initialize a new `SpanContext` where all values are known. +- Update the `ForceFlush` method signature to the `"go.opentelemetry.io/otel/sdk/trace".SpanProcessor` to accept a `context.Context` and return an error. (#1608) +- Update the `Shutdown` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` return an error on shutdown failure. (#1608) +- The SimpleSpanProcessor will now shut down the enclosed `SpanExporter` and gracefully ignore subsequent calls to `OnEnd` after `Shutdown` is called. (#1612) +- `"go.opentelemetry.io/sdk/metric/controller.basic".WithPusher` is replaced with `WithExporter` to provide consistent naming across project. (#1656) +- Added non-empty string check for trace `Attribute` keys. (#1659) +- Add `description` to SpanStatus only when `StatusCode` is set to error. (#1662) +- Jaeger exporter falls back to `resource.Default`'s `service.name` if the exported Span does not have one. (#1673) +- Jaeger exporter populates Jaeger's Span Process from Resource. (#1673) +- Renamed the `LabelSet` method of `"go.opentelemetry.io/otel/sdk/resource".Resource` to `Set`. (#1692) +- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1693) +- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1693) + +### Removed + +- Removed `serviceName` parameter from Zipkin exporter and uses resource instead. (#1549) +- Removed `WithConfig` from tracer provider to avoid overriding configuration. (#1633) +- Removed the exported `SimpleSpanProcessor` and `BatchSpanProcessor` structs. + These are now returned as a SpanProcessor interface from their respective constructors. (#1638) +- Removed `WithRecord()` from `trace.SpanOption` when creating a span. (#1660) +- Removed setting status to `Error` while recording an error as a span event in `RecordError`. (#1663) +- Removed `jaeger.WithProcess` configuration option. (#1673) +- Removed `ApplyConfig` method from `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` and the now unneeded `Config` struct. (#1693) + +### Fixed + +- Jaeger Exporter: Ensure mapping between OTEL and Jaeger span data complies with the specification. (#1626) +- `SamplingResult.TraceState` is correctly propagated to a newly created span's `SpanContext`. (#1655) +- The `otel-collector` example now correctly flushes metric events prior to shutting down the exporter. (#1678) +- Do not set span status message in `SpanStatusFromHTTPStatusCode` if it can be inferred from `http.status_code`. (#1681) +- Synchronization issues in global trace delegate implementation. (#1686) +- Reduced excess memory usage by global `TracerProvider`. (#1687) + +## [0.18.0] - 2021-03-03 + +### Added + +- Added `resource.Default()` for use with meter and tracer providers. (#1507) +- `AttributePerEventCountLimit` and `AttributePerLinkCountLimit` for `SpanLimits`. (#1535) +- Added `Keys()` method to `propagation.TextMapCarrier` and `propagation.HeaderCarrier` to adapt `http.Header` to this interface. (#1544) +- Added `code` attributes to `go.opentelemetry.io/otel/semconv` package. (#1558) +- Compatibility testing suite in the CI system for the following systems. (#1567) + | OS | Go Version | Architecture | + | ------- | ---------- | ------------ | + | Ubuntu | 1.15 | amd64 | + | Ubuntu | 1.14 | amd64 | + | Ubuntu | 1.15 | 386 | + | Ubuntu | 1.14 | 386 | + | MacOS | 1.15 | amd64 | + | MacOS | 1.14 | amd64 | + | Windows | 1.15 | amd64 | + | Windows | 1.14 | amd64 | + | Windows | 1.15 | 386 | + | Windows | 1.14 | 386 | + +### Changed + +- Replaced interface `oteltest.SpanRecorder` with its existing implementation + `StandardSpanRecorder`. (#1542) +- Default span limit values to 128. (#1535) +- Rename `MaxEventsPerSpan`, `MaxAttributesPerSpan` and `MaxLinksPerSpan` to `EventCountLimit`, `AttributeCountLimit` and `LinkCountLimit`, and move these fields into `SpanLimits`. (#1535) +- Renamed the `otel/label` package to `otel/attribute`. (#1541) +- Vendor the Jaeger exporter's dependency on Apache Thrift. (#1551) +- Parallelize the CI linting and testing. (#1567) +- Stagger timestamps in exact aggregator tests. (#1569) +- Changed all examples to use `WithBatchTimeout(5 * time.Second)` rather than `WithBatchTimeout(5)`. (#1621) +- Prevent end-users from implementing some interfaces (#1575) + + ``` + "otel/exporters/otlp/otlphttp".Option + "otel/exporters/stdout".Option + "otel/oteltest".Option + "otel/trace".TracerOption + "otel/trace".SpanOption + "otel/trace".EventOption + "otel/trace".LifeCycleOption + "otel/trace".InstrumentationOption + "otel/sdk/resource".Option + "otel/sdk/trace".ParentBasedSamplerOption + "otel/sdk/trace".ReadOnlySpan + "otel/sdk/trace".ReadWriteSpan + ``` + +### Removed + +- Removed attempt to resample spans upon changing the span name with `span.SetName()`. (#1545) +- The `test-benchmark` is no longer a dependency of the `precommit` make target. (#1567) +- Removed the `test-386` make target. + This was replaced with a full compatibility testing suite (i.e. multi OS/arch) in the CI system. (#1567) + +### Fixed + +- The sequential timing check of timestamps in the stdout exporter are now setup explicitly to be sequential (#1571). (#1572) +- Windows build of Jaeger tests now compiles with OS specific functions (#1576). (#1577) +- The sequential timing check of timestamps of go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue are now setup explicitly to be sequential (#1578). (#1579) +- Validate tracestate header keys with vendors according to the W3C TraceContext specification (#1475). (#1581) +- The OTLP exporter includes related labels for translations of a GaugeArray (#1563). (#1570) + +## [0.17.0] - 2021-02-12 + +### Changed + +- Rename project default branch from `master` to `main`. (#1505) +- Reverse order in which `Resource` attributes are merged, per change in spec. (#1501) +- Add tooling to maintain "replace" directives in go.mod files automatically. (#1528) +- Create new modules: otel/metric, otel/trace, otel/oteltest, otel/sdk/export/metric, otel/sdk/metric (#1528) +- Move metric-related public global APIs from otel to otel/metric/global. (#1528) + +## Fixed + +- Fixed otlpgrpc reconnection issue. +- The example code in the README.md of `go.opentelemetry.io/otel/exporters/otlp` is moved to a compiled example test and used the new `WithAddress` instead of `WithEndpoint`. (#1513) +- The otel-collector example now uses the default OTLP receiver port of the collector. + +## [0.16.0] - 2021-01-13 + +### Added + +- Add the `ReadOnlySpan` and `ReadWriteSpan` interfaces to provide better control for accessing span data. (#1360) +- `NewGRPCDriver` function returns a `ProtocolDriver` that maintains a single gRPC connection to the collector. (#1369) +- Added documentation about the project's versioning policy. (#1388) +- Added `NewSplitDriver` for OTLP exporter that allows sending traces and metrics to different endpoints. (#1418) +- Added codeql worfklow to GitHub Actions (#1428) +- Added Gosec workflow to GitHub Actions (#1429) +- Add new HTTP driver for OTLP exporter in `exporters/otlp/otlphttp`. Currently it only supports the binary protobuf payloads. (#1420) +- Add an OpenCensus exporter bridge. (#1444) + +### Changed + +- Rename `internal/testing` to `internal/internaltest`. (#1449) +- Rename `export.SpanData` to `export.SpanSnapshot` and use it only for exporting spans. (#1360) +- Store the parent's full `SpanContext` rather than just its span ID in the `span` struct. (#1360) +- Improve span duration accuracy. (#1360) +- Migrated CI/CD from CircleCI to GitHub Actions (#1382) +- Remove duplicate checkout from GitHub Actions workflow (#1407) +- Metric `array` aggregator renamed `exact` to match its `aggregation.Kind` (#1412) +- Metric `exact` aggregator includes per-point timestamps (#1412) +- Metric stdout exporter uses MinMaxSumCount aggregator for ValueRecorder instruments (#1412) +- `NewExporter` from `exporters/otlp` now takes a `ProtocolDriver` as a parameter. (#1369) +- Many OTLP Exporter options became gRPC ProtocolDriver options. (#1369) +- Unify endpoint API that related to OTel exporter. (#1401) +- Optimize metric histogram aggregator to re-use its slice of buckets. (#1435) +- Metric aggregator Count() and histogram Bucket.Counts are consistently `uint64`. (1430) +- Histogram aggregator accepts functional options, uses default boundaries if none given. (#1434) +- `SamplingResult` now passed a `Tracestate` from the parent `SpanContext` (#1432) +- Moved gRPC driver for OTLP exporter to `exporters/otlp/otlpgrpc`. (#1420) +- The `TraceContext` propagator now correctly propagates `TraceState` through the `SpanContext`. (#1447) +- Metric Push and Pull Controller components are combined into a single "basic" Controller: + - `WithExporter()` and `Start()` to configure Push behavior + - `Start()` is optional; use `Collect()` and `ForEach()` for Pull behavior + - `Start()` and `Stop()` accept Context. (#1378) +- The `Event` type is moved from the `otel/sdk/export/trace` package to the `otel/trace` API package. (#1452) + +### Removed + +- Remove `errUninitializedSpan` as its only usage is now obsolete. (#1360) +- Remove Metric export functionality related to quantiles and summary data points: this is not specified (#1412) +- Remove DDSketch metric aggregator; our intention is to re-introduce this as an option of the histogram aggregator after [new OTLP histogram data types](https://github.com/open-telemetry/opentelemetry-proto/pull/226) are released (#1412) + +### Fixed + +- `BatchSpanProcessor.Shutdown()` will now shutdown underlying `export.SpanExporter`. (#1443) + ## [0.15.0] - 2020-12-10 ### Added @@ -23,7 +586,6 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm - Bump `github.com/google/go-cmp` from 0.5.3 to 0.5.4 (#1374) - Bump `github.com/golangci/golangci-lint` in `/internal/tools` (#1375) - ### Fixed - Metric SDK `SumObserver` and `UpDownSumObserver` instruments correctness fixes. (#1381) @@ -35,6 +597,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm - An `EventOption` and the related `NewEventConfig` function are added to the `go.opentelemetry.io/otel` package to configure Span events. (#1254) - A `TextMapPropagator` and associated `TextMapCarrier` are added to the `go.opentelemetry.io/otel/oteltest` package to test `TextMap` type propagators and their use. (#1259) - `SpanContextFromContext` returns `SpanContext` from context. (#1255) +- `TraceState` has been added to `SpanContext`. (#1340) - `DeploymentEnvironmentKey` added to `go.opentelemetry.io/otel/semconv` package. (#1323) - Add an OpenCensus to OpenTelemetry tracing bridge. (#1305) - Add a parent context argument to `SpanProcessor.OnStart` to follow the specification. (#1333) @@ -80,6 +643,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm - The `MockSpan` and `MockTracer` types are removed from `go.opentelemetry.io/otel/oteltest`. `Tracer` and `Span` from the same module should be used in their place instead. (#1306) - `WorkerCount` option is removed from `go.opentelemetry.io/otel/exporters/otlp`. (#1350) +- Remove the following labels types: INT32, UINT32, UINT64 and FLOAT32. (#1314) ### Fixed @@ -87,7 +651,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm - The `go.opentelemetry.io/otel/api/global` packages global TextMapPropagator now delegates functionality to a globally set delegate for all previously returned propagators. (#1258) - Fix condition in `label.Any`. (#1299) - Fix global `TracerProvider` to pass options to its configured provider. (#1329) -- Fix missing handler for `ExactKind` aggregator in OTLP metrics transformer (#1309) +- Fix missing handler for `ExactKind` aggregator in OTLP metrics transformer (#1309) ## [0.13.0] - 2020-10-08 @@ -508,7 +1072,7 @@ This release implements the v0.5.0 version of the OpenTelemetry specification. - Rename `Observer` instrument to `ValueObserver`. (#734) - The push controller now has a method (`Provider()`) to return a `metric.Provider` instead of the old `Meter` method that acted as a `metric.Provider`. (#738) - Replace `Measure` instrument by `ValueRecorder` instrument. (#732) -- Rename correlation context header from `"Correlation-Context"` to `"otcorrelations"` to match the OpenTelemetry specification. 727) +- Rename correlation context header from `"Correlation-Context"` to `"otcorrelations"` to match the OpenTelemetry specification. (#727) ### Fixed @@ -611,7 +1175,6 @@ This release implements the v0.5.0 version of the OpenTelemetry specification. - Create a new recorder rather than reuse when multiple observations in same epoch for asynchronous instruments. #610 - The default port the OTLP exporter uses to connect to the OpenTelemetry collector is updated to match the one the collector listens on by default. (#611) - ## [0.4.2] - 2020-03-31 ### Fixed @@ -671,7 +1234,7 @@ There is still a possibility of breaking changes. - Simplified export setup pipeline for the jaeger exporter to match other exporters. (#459) - The zipkin trace exporter. (#495) - The OTLP exporter to export metric and trace telemetry to the OpenTelemetry collector. (#497) (#544) (#545) -- The `StatusMessage` field was add to the trace `Span`. (#524) +- Add `StatusMessage` field to the trace `Span`. (#524) - Context propagation in OpenTracing bridge in terms of OpenTelemetry context propagation. (#525) - The `Resource` type was added to the SDK. (#528) - The global API now supports a `Tracer` and `Meter` function as shortcuts to getting a global `*Provider` and calling these methods directly. (#538) @@ -764,14 +1327,12 @@ There is still a possibility of breaking changes. - `AlwaysParentSample` sampler to the trace API. (#455) - `WithNewRoot` option function to the trace API to specify the created span should be considered a root span. (#451) - ### Changed - Renamed `WithMap` to `ContextWithMap` in the correlation package. (#481) - Renamed `FromContext` to `MapFromContext` in the correlation package. (#481) - Move correlation context propagation to correlation package. (#479) - Do not default to putting remote span context into links. (#480) -- Propagators extrac - `Tracer.WithSpan` updated to accept `StartOptions`. (#472) - Renamed `MetricKind` to `Kind` to not stutter in the type usage. (#432) - Renamed the `export` package to `metric` to match directory structure. (#432) @@ -919,7 +1480,6 @@ There is still a possibility of breaking changes. This allowed distinct label sets through, but any metrics sharing a label set could be overwritten or merged incorrectly. This was corrected. (#333) - ## [0.1.2] - 2019-11-18 ### Fixed @@ -979,8 +1539,17 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. - -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v0.15.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.0.0...HEAD +[1.0.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0 +[1.0.0-RC3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC3 +[1.0.0-RC2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC2 +[Experimental Metrics v0.22.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.22.0 +[1.0.0-RC1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC1 +[0.20.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.20.0 +[0.19.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.19.0 +[0.18.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.18.0 +[0.17.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.17.0 +[0.16.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.16.0 [0.15.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.15.0 [0.14.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.14.0 [0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.13.0 diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/CODEOWNERS b/src/runtime/vendor/go.opentelemetry.io/otel/CODEOWNERS index b2e99fc38..376de4b97 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ b/src/runtime/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -5,13 +5,13 @@ ##################################################### # # Learn about membership in OpenTelemetry community: -# https://github.com/open-telemetry/community/blob/master/community-membership.md +# https://github.com/open-telemetry/community/blob/main/community-membership.md # # # Learn about CODEOWNERS file format: # https://help.github.com/en/articles/about-code-owners # -* @jmacd @lizthegrey @MrAlias @Aneurysm9 @evantorrie @XSAM +* @jmacd @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @paivagustavo @MadVikingGod @pellared CODEOWNERS @MrAlias @Aneurysm9 diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/src/runtime/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index 85d9a0987..c900e2a73 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/src/runtime/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -9,17 +9,17 @@ See the [public meeting notes](https://docs.google.com/document/d/1A63zSWX0x2CyCK_LoNhmQC4rqhLpYXJzXbEPDUQ2n6w/edit#heading=h.9tngw7jdwd6b) for a summary description of past meetings. To request edit access, join the meeting or get in touch on -[Gitter](https://gitter.im/open-telemetry/opentelemetry-go). +[Slack](https://cloud-native.slack.com/archives/C01NPAXACKT). ## Development You can view and edit the source code by cloning this repository: -```bash +```sh git clone https://github.com/open-telemetry/opentelemetry-go.git ``` -Run `make test` to run the tests instead of `go test`. +Run `make test` to run the tests instead of `go test`. There are some generated files checked into the repo. To make sure that the generated files are up-to-date, run `make` (or `make @@ -43,7 +43,7 @@ To create a new PR, fork the project in GitHub and clone the upstream repo: ```sh -$ go get -d go.opentelemetry.io/otel +go get -d go.opentelemetry.io/otel ``` (This may print some warning about "build constraints exclude all Go @@ -53,7 +53,7 @@ This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You can alternatively use `git` directly with: ```sh -$ git clone https://github.com/open-telemetry/opentelemetry-go +git clone https://github.com/open-telemetry/opentelemetry-go ``` (Note that `git clone` is *not* using the `go.opentelemetry.io/otel` name - @@ -66,20 +66,20 @@ current working directory. Enter the newly created directory and add your fork as a new remote: ```sh -$ git remote add git@github.com:/opentelemetry-go +git remote add git@github.com:/opentelemetry-go ``` Check out a new branch, make modifications, run linters and tests, update `CHANGELOG.md`, and push the branch to your fork: ```sh -$ git checkout -b +git checkout -b # edit files # update changelog -$ make precommit -$ git add -p -$ git commit -$ git push +make precommit +git add -p +git commit +git push ``` Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull @@ -100,13 +100,20 @@ A PR is considered to be **ready to merge** when: different companies). This is not enforced through technical means and a PR may be **ready to merge** with a single approval if the change and its approach have been discussed and consensus reached. -* Major feedbacks are resolved. +* Feedback has been addressed. +* Any substantive changes to your PR will require that you clear any prior + Approval reviews, this includes changes resulting from other feedback. Unless + the approver explicitly stated that their approval will persist across + changes it should be assumed that the PR needs their review again. Other + project members (e.g. approvers, maintainers) can help with this if there are + any questions or if you forget to clear reviews. * It has been open for review for at least one working day. This gives people reasonable time to review. * Trivial changes (typo, cosmetic, doc, etc.) do not have to wait for one day and may be merged with a single Maintainer's approval. * `CHANGELOG.md` has been updated to reflect what has been added, changed, removed, or fixed. +* `README.md` has been updated if necessary. * Urgent fix can take exception as long as it has been actively communicated. @@ -118,7 +125,7 @@ As with other OpenTelemetry clients, opentelemetry-go follows the [opentelemetry-specification](https://github.com/open-telemetry/opentelemetry-specification). It's especially valuable to read through the [library -guidelines](https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/library-guidelines.md). +guidelines](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/library-guidelines.md). ### Focus on Capabilities, Not Structure Compliance @@ -134,8 +141,28 @@ It is preferable to have contributions follow the idioms of the language rather than conform to specific API names or argument patterns in the spec. -For a deeper discussion, see: -https://github.com/open-telemetry/opentelemetry-specification/issues/165 +For a deeper discussion, see +[this](https://github.com/open-telemetry/opentelemetry-specification/issues/165). + +## Documentation + +Each non-example Go Module should have its own `README.md` containing: + +- A pkg.go.dev badge which can be generated [here](https://pkg.go.dev/badge/). +- Brief description. +- Installation instructions (and requirements if applicable). +- Hyperlink to an example. Depending on the component the example can be: + - An `example_test.go` like [here](exporters/stdout/stdouttrace/example_test.go). + - A sample Go application with its own `README.md`, like [here](example/zipkin). +- Additional documentation sections such us: + - Configuration, + - Contributing, + - References. + +[Here](exporters/jaeger/README.md) is an example of a concise `README.md`. + +Moreover, it should be possible to navigate to any `README.md` from the +root `README.md`. ## Style Guide @@ -160,32 +187,40 @@ to the reasons why. ### Configuration -When creating an instantiation function for a complex `struct` it is useful -to allow variable number of options to be applied. However, the strong type -system of Go restricts the function design options. There are a few ways to -solve this problem, but we have landed on the following design. +When creating an instantiation function for a complex `type T struct`, it is +useful to allow variable number of options to be applied. However, the strong +type system of Go restricts the function design options. There are a few ways +to solve this problem, but we have landed on the following design. #### `config` Configuration should be held in a `struct` named `config`, or prefixed with specific type name this Configuration applies to if there are multiple -`config` in the package. This `struct` must contain configuration options. +`config` in the package. This type must contain configuration options. ```go // config contains configuration options for a thing. type config struct { - // options ... + // options ... } ``` -In general the `config` `struct` will not need to be used externally to the +In general the `config` type will not need to be used externally to the package and should be unexported. If, however, it is expected that the user will likely want to build custom options for the configuration, the `config` should be exported. Please, include in the documentation for the `config` how the user can extend the configuration. -It is important that `config` are not shared across package boundaries. -Meaning a `config` from one package should not be directly used by another. +It is important that internal `config` are not shared across package boundaries. +Meaning a `config` from one package should not be directly used by another. The +one exception is the API packages. The configs from the base API, eg. +`go.opentelemetry.io/otel/trace.TracerConfig` and +`go.opentelemetry.io/otel/metric.InstrumentConfig`, are intended to be consumed +by the SDK therefor it is expected that these are exported. + +When a config is exported we want to maintain forward and backward +compatibility, to achieve this no fields should be exported but should +instead be accessed by methods. Optionally, it is common to include a `newConfig` function (with the same naming scheme). This function wraps any defaults setting and looping over @@ -194,13 +229,13 @@ all options to create a configured `config`. ```go // newConfig returns an appropriately configured config. func newConfig([]Option) config { - // Set default values for config. - config := config{/* […] */} - for _, option := range options { - option.Apply(&config) - } - // Preform any validation here. - return config + // Set default values for config. + config := config{/* […] */} + for _, option := range options { + option.apply(&config) + } + // Preform any validation here. + return config } ``` @@ -218,10 +253,14 @@ To set the value of the options a `config` contains, a corresponding ```go type Option interface { - Apply(*config) + apply(*config) } ``` +Having `apply` unexported makes sure that it will not be used externally. +Moreover, the interface becomes sealed so the user cannot easily implement +the interface on its own. + The name of the interface should be prefixed in the same way the corresponding `config` is (if at all). @@ -244,53 +283,70 @@ func With*(…) Option { … } ```go type defaultFalseOption bool -func (o defaultFalseOption) Apply(c *config) { - c.Bool = bool(o) +func (o defaultFalseOption) apply(c *config) { + c.Bool = bool(o) } -// WithOption sets a T* to have an option included. +// WithOption sets a T to have an option included. func WithOption() Option { - return defaultFalseOption(true) + return defaultFalseOption(true) } ``` ```go type defaultTrueOption bool -func (o defaultTrueOption) Apply(c *config) { - c.Bool = bool(o) +func (o defaultTrueOption) apply(c *config) { + c.Bool = bool(o) } -// WithoutOption sets a T* to have Bool option excluded. +// WithoutOption sets a T to have Bool option excluded. func WithoutOption() Option { - return defaultTrueOption(false) + return defaultTrueOption(false) } -```` +``` ##### Declared Type Options ```go type myTypeOption struct { - MyType MyType + MyType MyType +} + +func (o myTypeOption) apply(c *config) { + c.MyType = o.MyType } -func (o myTypeOption) Apply(c *config) { - c.MyType = o.MyType +// WithMyType sets T to have include MyType. +func WithMyType(t MyType) Option { + return myTypeOption{t} } +``` + +##### Functional Options -// WithMyType sets T* to have include MyType. +```go +type optionFunc func(*config) + +func (fn optionFunc) apply(c *config) { + fn(c) +} + +// WithMyType sets t as MyType. func WithMyType(t MyType) Option { - return myTypeOption{t} + return optionFunc(func(c *config) { + c.MyType = t + }) } ``` #### Instantiation -Using this configuration pattern to configure instantiation with a `New*` +Using this configuration pattern to configure instantiation with a `NewT` function. ```go -func NewT*(options ...Option) T* {…} +func NewT(options ...Option) T {…} ``` Any required parameters can be declared before the variadic `options`. @@ -314,12 +370,12 @@ type config struct { // DogOption apply Dog specific options. type DogOption interface { - ApplyDog(*config) + applyDog(*config) } // BirdOption apply Bird specific options. type BirdOption interface { - ApplyBird(*config) + applyBird(*config) } // Option apply options for all animals. @@ -329,23 +385,23 @@ type Option interface { } type weightOption float64 -func (o weightOption) ApplyDog(c *config) { c.Weight = float64(o) } -func (o weightOption) ApplyBird(c *config) { c.Weight = float64(o) } +func (o weightOption) applyDog(c *config) { c.Weight = float64(o) } +func (o weightOption) applyBird(c *config) { c.Weight = float64(o) } func WithWeight(w float64) Option { return weightOption(w) } type furColorOption string -func (o furColorOption) ApplyDog(c *config) { c.Color = string(o) } +func (o furColorOption) applyDog(c *config) { c.Color = string(o) } func WithFurColor(c string) DogOption { return furColorOption(c) } type maxAltitudeOption float64 -func (o maxAltitudeOption) ApplyBird(c *config) { c.MaxAltitude = float64(o) } +func (o maxAltitudeOption) applyBird(c *config) { c.MaxAltitude = float64(o) } func WithMaxAltitude(a float64) BirdOption { return maxAltitudeOption(a) } func NewDog(name string, o ...DogOption) Dog {…} func NewBird(name string, o ...BirdOption) Bird {…} ``` -### Interface Type +### Interfaces To allow other developers to better comprehend the code, it is important to ensure it is sufficiently documented. One simple measure that contributes @@ -353,21 +409,87 @@ to this aim is self-documenting by naming method parameters. Therefore, where appropriate, methods of every exported interface type should have their parameters appropriately named. +#### Interface Stability + +All exported stable interfaces that include the following warning in their +doumentation are allowed to be extended with additional methods. + +> Warning: methods may be added to this interface in minor releases. + +Otherwise, stable interfaces MUST NOT be modified. + +If new functionality is needed for an interface that cannot be changed it MUST +be added by including an additional interface. That added interface can be a +simple interface for the specific functionality that you want to add or it can +be a super-set of the original interface. For example, if you wanted to a +`Close` method to the `Exporter` interface: + +```go +type Exporter interface { + Export() +} +``` + +A new interface, `Closer`, can be added: + +```go +type Closer interface { + Close() +} +``` + +Code that is passed the `Exporter` interface can now check to see if the passed +value also satisfies the new interface. E.g. + +```go +func caller(e Exporter) { + /* ... */ + if c, ok := e.(Closer); ok { + c.Close() + } + /* ... */ +} +``` + +Alternatively, a new type that is the super-set of an `Exporter` can be created. + +```go +type ClosingExporter struct { + Exporter + Close() +} +``` + +This new type can be used similar to the simple interface above in that a +passed `Exporter` type can be asserted to satisfy the `ClosingExporter` type +and the `Close` method called. + +This super-set approach can be useful if there is explicit behavior that needs +to be coupled with the original type and passed as a unified type to a new +function, but, because of this coupling, it also limits the applicability of +the added functionality. If there exist other interfaces where this +functionality should be added, each one will need their own super-set +interfaces and will duplicate the pattern. For this reason, the simple targeted +interface that defines the specific functionality should be preferred. + ## Approvers and Maintainers Approvers: -- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb - [Evan Torrie](https://github.com/evantorrie), Verizon Media - [Josh MacDonald](https://github.com/jmacd), LightStep - [Sam Xie](https://github.com/XSAM) +- [David Ashpole](https://github.com/dashpole), Google +- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep +- [Aaron Clawson](https://github.com/MadVikingGod) +- [Robert Pająk](https://github.com/pellared), Splunk Maintainers: -- [Anthony Mirabella](https://github.com/Aneurysm9), Centene -- [Tyler Yahn](https://github.com/MrAlias), New Relic +- [Anthony Mirabella](https://github.com/Aneurysm9), AWS +- [Tyler Yahn](https://github.com/MrAlias), Splunk ### Become an Approver or a Maintainer See the [community membership document in OpenTelemetry community -repo](https://github.com/open-telemetry/community/blob/master/community-membership.md). +repo](https://github.com/open-telemetry/community/blob/main/community-membership.md). diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/Makefile b/src/runtime/vendor/go.opentelemetry.io/otel/Makefile index 130be713a..d5f52c638 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/Makefile +++ b/src/runtime/vendor/go.opentelemetry.io/otel/Makefile @@ -21,141 +21,138 @@ ALL_DOCS := $(shell find . -name '*.md' -type f | sort) ALL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(shell find . -type f -name 'go.mod' -exec dirname {} \; | egrep -v '^./example' | sort)) $(shell find ./example -type f -name 'go.mod' -exec dirname {} \; | sort) ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | egrep -v '^./example|^$(TOOLS_MOD_DIR)' | sort) -# Mac OS Catalina 10.5.x doesn't support 386. Hence skip 386 test -SKIP_386_TEST = false -UNAME_S := $(shell uname -s) -ifeq ($(UNAME_S),Darwin) - SW_VERS := $(shell sw_vers -productVersion) - ifeq ($(shell echo $(SW_VERS) | egrep '^(10.1[5-9]|1[1-9]|[2-9])'), $(SW_VERS)) - SKIP_386_TEST = true - endif -endif - -GOTEST_MIN = go test -timeout 30s -GOTEST = $(GOTEST_MIN) -race -GOTEST_WITH_COVERAGE = $(GOTEST) -coverprofile=coverage.out -covermode=atomic -coverpkg=./... +GO = go +TIMEOUT = 60 .DEFAULT_GOAL := precommit -.PHONY: precommit +.PHONY: precommit ci +precommit: dependabot-check license-check lint build examples test-default +ci: precommit check-clean-work-tree test-coverage -TOOLS_DIR := $(abspath ./.tools) +# Tools -$(TOOLS_DIR)/golangci-lint: $(TOOLS_MOD_DIR)/go.mod $(TOOLS_MOD_DIR)/go.sum $(TOOLS_MOD_DIR)/tools.go - cd $(TOOLS_MOD_DIR) && \ - go build -o $(TOOLS_DIR)/golangci-lint github.com/golangci/golangci-lint/cmd/golangci-lint +TOOLS = $(CURDIR)/.tools -$(TOOLS_DIR)/misspell: $(TOOLS_MOD_DIR)/go.mod $(TOOLS_MOD_DIR)/go.sum $(TOOLS_MOD_DIR)/tools.go +$(TOOLS): + @mkdir -p $@ +$(TOOLS)/%: | $(TOOLS) cd $(TOOLS_MOD_DIR) && \ - go build -o $(TOOLS_DIR)/misspell github.com/client9/misspell/cmd/misspell + $(GO) build -o $@ $(PACKAGE) -$(TOOLS_DIR)/stringer: $(TOOLS_MOD_DIR)/go.mod $(TOOLS_MOD_DIR)/go.sum $(TOOLS_MOD_DIR)/tools.go - cd $(TOOLS_MOD_DIR) && \ - go build -o $(TOOLS_DIR)/stringer golang.org/x/tools/cmd/stringer +SEMCONVGEN = $(TOOLS)/semconvgen +$(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen -$(TOOLS_DIR)/gojq: $(TOOLS_MOD_DIR)/go.mod $(TOOLS_MOD_DIR)/go.sum $(TOOLS_MOD_DIR)/tools.go - cd $(TOOLS_MOD_DIR) && \ - go build -o $(TOOLS_DIR)/gojq github.com/itchyny/gojq/cmd/gojq +CROSSLINK = $(TOOLS)/crosslink +$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/crosslink -precommit: dependabot-check license-check generate build lint examples test-benchmarks test +GOLANGCI_LINT = $(TOOLS)/golangci-lint +$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint -.PHONY: test-with-coverage -test-with-coverage: - set -e; \ - printf "" > coverage.txt; \ - for dir in $(ALL_COVERAGE_MOD_DIRS); do \ - echo "go test ./... + coverage in $${dir}"; \ - (cd "$${dir}" && \ - $(GOTEST_WITH_COVERAGE) ./... && \ - go tool cover -html=coverage.out -o coverage.html); \ - [ -f "$${dir}/coverage.out" ] && cat "$${dir}/coverage.out" >> coverage.txt; \ - done; \ - sed -i.bak -e '2,$$ { /^mode: /d; }' coverage.txt +MISSPELL = $(TOOLS)/misspell +$(TOOLS)/misspell: PACKAGE= github.com/client9/misspell/cmd/misspell +GOCOVMERGE = $(TOOLS)/gocovmerge +$(TOOLS)/gocovmerge: PACKAGE= github.com/wadey/gocovmerge -.PHONY: ci -ci: precommit check-clean-work-tree test-with-coverage test-386 +STRINGER = $(TOOLS)/stringer +$(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer -.PHONY: check-clean-work-tree -check-clean-work-tree: - @if ! git diff --quiet; then \ - echo; \ - echo 'Working tree is not clean, did you forget to run "make precommit"?'; \ - echo; \ - git status; \ - exit 1; \ - fi +$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq -.PHONY: build -build: - # TODO: Fix this on windows. - set -e; for dir in $(ALL_GO_MOD_DIRS); do \ - echo "compiling all packages in $${dir}"; \ +.PHONY: tools +tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(TOOLS)/gojq $(SEMCONVGEN) + + +# Build + +.PHONY: examples generate build +examples: + @set -e; for dir in $(EXAMPLES); do \ + echo "$(GO) build $${dir}/..."; \ (cd "$${dir}" && \ - go build ./... && \ - go test -run xxxxxMatchNothingxxxxx ./... >/dev/null); \ + $(GO) build .); \ done -.PHONY: test -test: +generate: $(STRINGER) set -e; for dir in $(ALL_GO_MOD_DIRS); do \ - echo "go test ./... + race in $${dir}"; \ + echo "$(GO) generate $${dir}/..."; \ (cd "$${dir}" && \ - $(GOTEST) ./...); \ + PATH="$(TOOLS):$${PATH}" $(GO) generate ./...); \ done -.PHONY: test-386 -test-386: - if [ $(SKIP_386_TEST) = true ] ; then \ - echo "skipping the test for GOARCH 386 as it is not supported on the current OS"; \ - else \ - set -e; for dir in $(ALL_GO_MOD_DIRS); do \ - echo "go test ./... GOARCH 386 in $${dir}"; \ - (cd "$${dir}" && \ - GOARCH=386 $(GOTEST_MIN) ./...); \ - done; \ - fi - -.PHONY: examples -examples: - @set -e; for ex in $(EXAMPLES); do \ - echo "Building $${ex}"; \ - (cd "$${ex}" && \ - go build .); \ +build: generate + # Build all package code including testing code. + set -e; for dir in $(ALL_GO_MOD_DIRS); do \ + echo "$(GO) build $${dir}/..."; \ + (cd "$${dir}" && \ + $(GO) build ./... && \ + $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -vet=off -run xxxxxMatchNothingxxxxx >/dev/null); \ done -.PHONY: test-benchmarks -test-benchmarks: +# Tests + +TEST_TARGETS := test-default test-bench test-short test-verbose test-race +.PHONY: $(TEST_TARGETS) test +test-default: ARGS=-v -race +test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=. +test-short: ARGS=-short +test-verbose: ARGS=-v +test-race: ARGS=-race +$(TEST_TARGETS): test +test: @set -e; for dir in $(ALL_GO_MOD_DIRS); do \ - echo "test benchmarks in $${dir}"; \ - (cd "$${dir}" && go test -test.benchtime=1ms -run=NONE -bench=. ./...) > /dev/null; \ + echo "$(GO) test -timeout $(TIMEOUT)s $(ARGS) $${dir}/..."; \ + (cd "$${dir}" && \ + $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -timeout $(TIMEOUT)s $(ARGS)); \ done +COVERAGE_MODE = atomic +COVERAGE_PROFILE = coverage.out +.PHONY: test-coverage +test-coverage: | $(GOCOVMERGE) + @set -e; \ + printf "" > coverage.txt; \ + for dir in $(ALL_COVERAGE_MOD_DIRS); do \ + echo "$(GO) test -coverpkg=go.opentelemetry.io/otel/... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" $${dir}/..."; \ + (cd "$${dir}" && \ + $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" && \ + $(GO) tool cover -html=coverage.out -o coverage.html); \ + done; \ + $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt + .PHONY: lint -lint: $(TOOLS_DIR)/golangci-lint $(TOOLS_DIR)/misspell +lint: misspell lint-modules | $(GOLANGCI_LINT) set -e; for dir in $(ALL_GO_MOD_DIRS); do \ echo "golangci-lint in $${dir}"; \ (cd "$${dir}" && \ - $(TOOLS_DIR)/golangci-lint run --fix && \ - $(TOOLS_DIR)/golangci-lint run); \ - done - $(TOOLS_DIR)/misspell -w $(ALL_DOCS) - set -e; for dir in $(ALL_GO_MOD_DIRS) $(TOOLS_MOD_DIR); do \ - echo "go mod tidy in $${dir}"; \ - (cd "$${dir}" && \ - go mod tidy); \ + $(GOLANGCI_LINT) run --fix && \ + $(GOLANGCI_LINT) run); \ done -generate: $(TOOLS_DIR)/stringer - set -e; for dir in $(ALL_GO_MOD_DIRS); do \ - echo "running generators in $${dir}"; \ +.PHONY: misspell +misspell: | $(MISSPELL) + $(MISSPELL) -w $(ALL_DOCS) + +.PHONY: lint-modules +lint-modules: | $(CROSSLINK) + set -e; for dir in $(ALL_GO_MOD_DIRS) $(TOOLS_MOD_DIR); do \ + echo "$(GO) mod tidy in $${dir}"; \ (cd "$${dir}" && \ - PATH="$(TOOLS_DIR):$${PATH}" go generate ./...); \ + $(GO) mod tidy); \ done + echo "cross-linking all go modules" + $(CROSSLINK) .PHONY: license-check license-check: - @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path './vendor/*' ! -path './exporters/otlp/internal/opentelemetry-proto/*') ; do \ + @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*') ; do \ awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=3 { found=1; next } END { if (!found) print FILENAME }' $$f; \ done); \ if [ -n "$${licRes}" ]; then \ @@ -166,12 +163,23 @@ license-check: .PHONY: dependabot-check dependabot-check: @result=$$( \ - for f in $$( find . -type f -name go.mod -exec dirname {} \; | sed 's/^.\/\?/\//' ); \ - do grep -q "$$f" .github/dependabot.yml \ + for f in $$( find . -type f -name go.mod -exec dirname {} \; | sed 's/^.//' ); \ + do grep -q "directory: \+$$f" .github/dependabot.yml \ || echo "$$f"; \ done; \ ); \ if [ -n "$$result" ]; then \ echo "missing go.mod dependabot check:"; echo "$$result"; \ + echo "new modules need to be added to the .github/dependabot.yml file"; \ exit 1; \ fi + +.PHONY: check-clean-work-tree +check-clean-work-tree: + @if ! git diff --quiet; then \ + echo; \ + echo 'Working tree is not clean, did you forget to run "make precommit"?'; \ + echo; \ + git status; \ + exit 1; \ + fi diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/Makefile.proto b/src/runtime/vendor/go.opentelemetry.io/otel/Makefile.proto deleted file mode 100644 index 5e51af3a5..000000000 --- a/src/runtime/vendor/go.opentelemetry.io/otel/Makefile.proto +++ /dev/null @@ -1,129 +0,0 @@ -# -*- mode: makefile; -*- -# Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# This Makefile.proto has rules to generate go code for otlp -# exporter. It does it by copying the proto files from -# `exporters/otlp/internal/opentelemetry-proto` (which is a -# submodule that needs to be checked out) into `gen/proto`, changing -# the go_package option to a valid string, generating the go files and -# finally copying the files into the module. The files are not -# generated in place, because protoc generates a too-deep directory -# structure. -# -# Currently, all the generated code is in -# `exporters/otlp/internal/opentelemetry-proto-gen`. -# -# Prereqs: wget (for downloading the zip file with protoc binary), -# unzip (for unpacking the archive), rsync (for copying back the -# generated files). - -PROTOC_VERSION := 3.14.0 - -TOOLS_DIR := $(abspath ./.tools) -TOOLS_MOD_DIR := ./internal/tools -PROTOBUF_VERSION := v1 -OTEL_PROTO_SUBMODULE := exporters/otlp/internal/opentelemetry-proto -GEN_TEMP_DIR := gen -SUBMODULE_PROTO_FILES := $(wildcard $(OTEL_PROTO_SUBMODULE)/opentelemetry/proto/*/$(PROTOBUF_VERSION)/*.proto) $(wildcard $(OTEL_PROTO_SUBMODULE)/opentelemetry/proto/collector/*/$(PROTOBUF_VERSION)/*.proto) - -ifeq ($(strip $(SUBMODULE_PROTO_FILES)),) -$(error Submodule at $(OTEL_PROTO_SUBMODULE) is not checked out, use "git submodule update --init") -endif - -PROTOBUF_GEN_DIR := exporters/otlp/internal/opentelemetry-proto-gen -PROTOBUF_TEMP_DIR := $(GEN_TEMP_DIR)/pb-go -PROTO_SOURCE_DIR := $(GEN_TEMP_DIR)/proto -SOURCE_PROTO_FILES := $(subst $(OTEL_PROTO_SUBMODULE),$(PROTO_SOURCE_DIR),$(SUBMODULE_PROTO_FILES)) - -.DEFAULT_GOAL := protobuf - -UNAME_S := $(shell uname -s) -UNAME_M := $(shell uname -m) - -ifeq ($(UNAME_S),Linux) - -PROTOC_OS := linux -PROTOC_ARCH := $(UNAME_M) - -else ifeq ($(UNAME_S),Darwin) - -PROTOC_OS := osx -PROTOC_ARCH := x86_64 - -endif - -PROTOC_ZIP_URL := https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/protoc-$(PROTOC_VERSION)-$(PROTOC_OS)-$(PROTOC_ARCH).zip - -$(TOOLS_DIR)/PROTOC_$(PROTOC_VERSION): - @rm -f "$(TOOLS_DIR)"/PROTOC_* && \ - touch "$@" - -# Depend on a versioned file (like PROTOC_3.14.0), so when version -# gets bumped, we will depend on a nonexistent file and thus download -# a newer version. -$(TOOLS_DIR)/protoc/bin/protoc: $(TOOLS_DIR)/PROTOC_$(PROTOC_VERSION) - echo "Fetching protoc $(PROTOC_VERSION)" && \ - rm -rf $(TOOLS_DIR)/protoc && \ - wget -O $(TOOLS_DIR)/protoc.zip $(PROTOC_ZIP_URL) && \ - unzip $(TOOLS_DIR)/protoc.zip -d $(TOOLS_DIR)/protoc-tmp && \ - rm $(TOOLS_DIR)/protoc.zip && \ - touch $(TOOLS_DIR)/protoc-tmp/bin/protoc && \ - mv $(TOOLS_DIR)/protoc-tmp $(TOOLS_DIR)/protoc - -$(TOOLS_DIR)/protoc-gen-gogofast: $(TOOLS_MOD_DIR)/go.mod $(TOOLS_MOD_DIR)/go.sum $(TOOLS_MOD_DIR)/tools.go - cd $(TOOLS_MOD_DIR) && \ - go build -o $(TOOLS_DIR)/protoc-gen-gogofast github.com/gogo/protobuf/protoc-gen-gogofast && \ - go mod tidy - -# Return a sed expression for replacing the go_package option in proto -# file with a one that's valid for us. -# -# Example: $(call get-sed-expr,$(PROTOBUF_GEN_DIR)) -define get-sed-expr -'s,go_package = "github.com/open-telemetry/opentelemetry-proto/gen/go,go_package = "go.opentelemetry.io/otel/$(1),' -endef - -.PHONY: protobuf -protobuf: protobuf-source gen-protobuf copy-protobufs - -.PHONY: protobuf-source -protobuf-source: $(SOURCE_PROTO_FILES) - -# This copies proto files from submodule into $(PROTO_SOURCE_DIR), -# thus satisfying the $(SOURCE_PROTO_FILES) prerequisite. The copies -# have their package name replaced by go.opentelemetry.io/otel. -$(PROTO_SOURCE_DIR)/%.proto: $(OTEL_PROTO_SUBMODULE)/%.proto - @ \ - mkdir -p $(@D); \ - sed -e $(call get-sed-expr,$(PROTOBUF_GEN_DIR)) "$<" >"$@.tmp"; \ - mv "$@.tmp" "$@" - -.PHONY: gen-protobuf -gen-protobuf: $(SOURCE_PROTO_FILES) $(TOOLS_DIR)/protoc-gen-gogofast $(TOOLS_DIR)/protoc/bin/protoc - @ \ - mkdir -p "$(PROTOBUF_TEMP_DIR)"; \ - set -e; for f in $^; do \ - if [[ "$${f}" == $(TOOLS_DIR)/* ]]; then continue; fi; \ - echo "protoc $${f#"$(PROTO_SOURCE_DIR)/"}"; \ - PATH="$(TOOLS_DIR):$${PATH}" $(TOOLS_DIR)/protoc/bin/protoc --proto_path="$(PROTO_SOURCE_DIR)" --gogofast_out="plugins=grpc:$(PROTOBUF_TEMP_DIR)" "$${f}"; \ - done - -.PHONY: copy-protobufs -copy-protobufs: - @rsync -a $(PROTOBUF_TEMP_DIR)/go.opentelemetry.io/otel/exporters . - -.PHONY: clean -clean: - rm -rf $(GEN_TEMP_DIR) diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/README.md b/src/runtime/vendor/go.opentelemetry.io/otel/README.md index 3b3fc8773..d90db542f 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/README.md +++ b/src/runtime/vendor/go.opentelemetry.io/otel/README.md @@ -1,30 +1,57 @@ # OpenTelemetry-Go -[![Circle CI](https://circleci.com/gh/open-telemetry/opentelemetry-go.svg?style=svg)](https://circleci.com/gh/open-telemetry/opentelemetry-go) +[![CI](https://github.com/open-telemetry/opentelemetry-go/workflows/ci/badge.svg)](https://github.com/open-telemetry/opentelemetry-go/actions?query=workflow%3Aci+branch%3Amain) +[![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-go/coverage.svg?branch=main)](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main) [![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel)](https://pkg.go.dev/go.opentelemetry.io/otel) [![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel) -[![Gitter](https://badges.gitter.im/open-telemetry/opentelemetry-go.svg)](https://gitter.im/open-telemetry/opentelemetry-go?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) +[![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT) -The Go [OpenTelemetry](https://opentelemetry.io/) implementation. +OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/). +It provides a set of APIs to directly measure performance and behavior of your software and send this data to observability platforms. ## Project Status -**Warning**: this project is currently in a pre-GA phase. Backwards -incompatible changes may be introduced in subsequent minor version releases as -we work to track the evolving OpenTelemetry specification and user feedback. +| Signal | Status | Project | +| ------- | ---------- | ------- | +| Traces | Stable | N/A | +| Metrics | Alpha | N/A | +| Logs | Frozen [1] | N/A | -Our progress towards a GA release candidate is tracked in [this project -board](https://github.com/orgs/open-telemetry/projects/5). This release -candidate will follow semantic versioning and will be released with a major -version greater than zero. +- [1]: The Logs signal development is halted for this project while we develop both Traces and Metrics. + No Logs Pull Requests are currently being accepted. Progress and status specific to this repository is tracked in our local [project boards](https://github.com/open-telemetry/opentelemetry-go/projects) and [milestones](https://github.com/open-telemetry/opentelemetry-go/milestones). +Project versioning information and stability guarantees can be found in the +[versioning documentation](./VERSIONING.md). + +### Compatibility + +This project is tested on the following systems. + +| OS | Go Version | Architecture | +| ------- | ---------- | ------------ | +| Ubuntu | 1.16 | amd64 | +| Ubuntu | 1.15 | amd64 | +| Ubuntu | 1.16 | 386 | +| Ubuntu | 1.15 | 386 | +| MacOS | 1.16 | amd64 | +| MacOS | 1.15 | amd64 | +| Windows | 1.16 | amd64 | +| Windows | 1.15 | amd64 | +| Windows | 1.16 | 386 | +| Windows | 1.15 | 386 | + +While this project should work for other systems, no compatibility guarantees +are made for those systems currently. + ## Getting Started +You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/go/getting-started/). + OpenTelemetry's goal is to provide a single set of APIs to capture distributed traces and metrics from your application and send them to an observability platform. This project allows you to do just that for applications written in @@ -37,7 +64,7 @@ To start capturing distributed traces and metric events from your application it first needs to be instrumented. The easiest way to do this is by using an instrumentation library for your code. Be sure to check out [the officially supported instrumentation -libraries](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/master/instrumentation). +libraries](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/instrumentation). If you need to extend the telemetry an instrumentation library provides or want to build your own instrumentation for your application directly you will need @@ -51,15 +78,17 @@ practical uses of this process. Now that your application is instrumented to collect telemetry, it needs an export pipeline to send that telemetry to an observability platform. -You can find officially supported exporters [here](./exporters/) and in the -companion [contrib -repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/master/exporters/metric). -Additionally, there are many vendor specific or 3rd party exporters for -OpenTelemetry. These exporters are broken down by -[trace](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/export/trace?tab=importedby) -and -[metric](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/export/metric?tab=importedby) -support. +All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters). + +| Exporter | Metrics | Traces | +| :-----------------------------------: | :-----: | :----: | +| [Jaeger](./exporters/jaeger/) | | ✓ | +| [OTLP](./exporters/otlp/) | ✓ | ✓ | +| [Prometheus](./exporters/prometheus/) | ✓ | | +| [stdout](./exporters/stdout/) | ✓ | ✓ | +| [Zipkin](./exporters/zipkin/) | | ✓ | + +Additionally, OpenTelemetry community supported exporters can be found in the [contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/exporters). ## Contributing diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/RELEASING.md b/src/runtime/vendor/go.opentelemetry.io/otel/RELEASING.md index 0fba7f175..730436672 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/src/runtime/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -1,5 +1,37 @@ # Release Process +## Semantic Convention Generation + +If a new version of the OpenTelemetry Specification has been released it will be necessary to generate a new +semantic convention package from the YAML definitions in the specification repository. There is a `semconvgen` utility +installed by `make tools` that can be used to generate the a package with the name matching the specification +version number under the `semconv` package. This will ideally be done soon after the specification release is +tagged. Make sure that the specification repo contains a checkout of the the latest tagged release so that the +generated files match the released semantic conventions. + +There are currently two categories of semantic conventions that must be generated, `resource` and `trace`. + +``` +.tools/semconvgen -i /path/to/specification/repo/semantic_conventions/resource -t semconv/template.j2 +.tools/semconvgen -i /path/to/specification/repo/semantic_conventions/trace -t semconv/template.j2 +``` + +Using default values for all options other than `input` will result in using the `template.j2` template to +generate `resource.go` and `trace.go` in `/path/to/otelgo/repo/semconv/`. + +There are several ancillary files that are not generated and should be copied into the new package from the +prior package, with updates made as appropriate to canonical import path statements and constant values. +These files include: + +* doc.go +* exception.go +* http(_test)?.go +* schema.go + +Uses of the previous schema version in this repository should be updated to use the newly generated version. +No tooling for this exists at present, so use find/replace in your editor of choice or craft a `grep | sed` +pipeline if you like living on the edge. + ## Pre-Release Update go.mod for submodules to depend on the new release which will happen in the next step. @@ -13,7 +45,7 @@ Update go.mod for submodules to depend on the new release which will happen in t 2. Verify the changes. ``` - git diff master + git diff main ``` This should have changed the version for all modules to be ``. @@ -32,7 +64,6 @@ Update go.mod for submodules to depend on the new release which will happen in t 4. Push the changes to upstream and create a Pull Request on GitHub. Be sure to include the curated changes from the [Changelog](./CHANGELOG.md) in the description. - ## Tag Once the Pull Request with all the version changes has been approved and merged it is time to tag the merged commit. @@ -44,7 +75,7 @@ Failure to do so will leave things in a broken state. It is critical you make sure the version you push upstream is correct. [Failure to do so will lead to minor emergencies and tough to work around](https://github.com/open-telemetry/opentelemetry-go/issues/331). -1. Run the tag.sh script using the `` of the commit on the master branch for the merged Pull Request. +1. Run the tag.sh script using the `` of the commit on the main branch for the merged Pull Request. ``` ./tag.sh @@ -76,6 +107,13 @@ After releasing verify that examples build outside of the repository. The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them. This ensures they build with the published release, not the local copy. -## Contrib Repository +## Post-Release + +### Contrib Repository + +Once verified be sure to [make a release for the `contrib` repository](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md) that uses this release. + +### Website Documentation -Once verified be sure to [make a release for the `contrib` repository](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/master/RELEASING.md) that uses this release. +Update [the documentation](./website_docs) for [the OpenTelemetry website](https://opentelemetry.io/docs/go/). +Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate. diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/VERSIONING.md b/src/runtime/vendor/go.opentelemetry.io/otel/VERSIONING.md new file mode 100644 index 000000000..412f1e362 --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/VERSIONING.md @@ -0,0 +1,224 @@ +# Versioning + +This document describes the versioning policy for this repository. This policy +is designed so the following goals can be achieved. + +**Users are provided a codebase of value that is stable and secure.** + +## Policy + +* Versioning of this project will be idiomatic of a Go project using [Go + modules](https://github.com/golang/go/wiki/Modules). + * [Semantic import + versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) + will be used. + * Versions will comply with [semver + 2.0](https://semver.org/spec/v2.0.0.html) with the following exceptions. + * New methods may be added to exported API interfaces. All exported + interfaces that fall within this exception will include the following + paragraph in their public documentation. + + > Warning: methods may be added to this interface in minor releases. + + * If a module is version `v2` or higher, the major version of the module + must be included as a `/vN` at the end of the module paths used in + `go.mod` files (e.g., `module go.opentelemetry.io/otel/v2`, `require + go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path + (e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the + paths used in `go get` commands (e.g., `go get + go.opentelemetry.io/otel/v2@v2.0.1`. Note there is both a `/v2` and a + `@v2.0.1` in that example. One way to think about it is that the module + name now includes the `/v2`, so include `/v2` whenever you are using the + module name). + * If a module is version `v0` or `v1`, do not include the major version in + either the module path or the import path. + * Modules will be used to encapsulate signals and components. + * Experimental modules still under active development will be versioned at + `v0` to imply the stability guarantee defined by + [semver](https://semver.org/spec/v2.0.0.html#spec-item-4). + + > Major version zero (0.y.z) is for initial development. Anything MAY + > change at any time. The public API SHOULD NOT be considered stable. + + * Mature modules for which we guarantee a stable public API will be versioned + with a major version greater than `v0`. + * The decision to make a module stable will be made on a case-by-case + basis by the maintainers of this project. + * Experimental modules will start their versioning at `v0.0.0` and will + increment their minor version when backwards incompatible changes are + released and increment their patch version when backwards compatible + changes are released. + * All stable modules that use the same major version number will use the + same entire version number. + * Stable modules may be released with an incremented minor or patch + version even though that module has not been changed, but rather so + that it will remain at the same version as other stable modules that + did undergo change. + * When an experimental module becomes stable a new stable module version + will be released and will include this now stable module. The new + stable module version will be an increment of the minor version number + and will be applied to all existing stable modules as well as the newly + stable module being released. +* Versioning of the associated [contrib + repository](https://github.com/open-telemetry/opentelemetry-go-contrib) of + this project will be idiomatic of a Go project using [Go + modules](https://github.com/golang/go/wiki/Modules). + * [Semantic import + versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) + will be used. + * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html). + * If a module is version `v2` or higher, the + major version of the module must be included as a `/vN` at the end of the + module paths used in `go.mod` files (e.g., `module + go.opentelemetry.io/contrib/instrumentation/host/v2`, `require + go.opentelemetry.io/contrib/instrumentation/host/v2 v2.0.1`) and in the + package import path (e.g., `import + "go.opentelemetry.io/contrib/instrumentation/host/v2"`). This includes + the paths used in `go get` commands (e.g., `go get + go.opentelemetry.io/contrib/instrumentation/host/v2@v2.0.1`. Note there + is both a `/v2` and a `@v2.0.1` in that example. One way to think about + it is that the module name now includes the `/v2`, so include `/v2` + whenever you are using the module name). + * If a module is version `v0` or `v1`, do not include the major version + in either the module path or the import path. + * In addition to public APIs, telemetry produced by stable instrumentation + will remain stable and backwards compatible. This is to avoid breaking + alerts and dashboard. + * Modules will be used to encapsulate instrumentation, detectors, exporters, + propagators, and any other independent sets of related components. + * Experimental modules still under active development will be versioned at + `v0` to imply the stability guarantee defined by + [semver](https://semver.org/spec/v2.0.0.html#spec-item-4). + + > Major version zero (0.y.z) is for initial development. Anything MAY + > change at any time. The public API SHOULD NOT be considered stable. + + * Mature modules for which we guarantee a stable public API and telemetry will + be versioned with a major version greater than `v0`. + * Experimental modules will start their versioning at `v0.0.0` and will + increment their minor version when backwards incompatible changes are + released and increment their patch version when backwards compatible + changes are released. + * Stable contrib modules cannot depend on experimental modules from this + project. + * All stable contrib modules of the same major version with this project + will use the same entire version as this project. + * Stable modules may be released with an incremented minor or patch + version even though that module's code has not been changed. Instead + the only change that will have been included is to have updated that + modules dependency on this project's stable APIs. + * When an experimental module in contrib becomes stable a new stable + module version will be released and will include this now stable + module. The new stable module version will be an increment of the minor + version number and will be applied to all existing stable contrib + modules, this project's modules, and the newly stable module being + released. + * Contrib modules will be kept up to date with this project's releases. + * Due to the dependency contrib modules will implicitly have on this + project's modules the release of stable contrib modules to match the + released version number will be staggered after this project's release. + There is no explicit time guarantee for how long after this projects + release the contrib release will be. Effort should be made to keep them + as close in time as possible. + * No additional stable release in this project can be made until the + contrib repository has a matching stable release. + * No release can be made in the contrib repository after this project's + stable release except for a stable release of the contrib repository. +* GitHub releases will be made for all releases. +* Go modules will be made available at Go package mirrors. + +## Example Versioning Lifecycle + +To better understand the implementation of the above policy the following +example is provided. This project is simplified to include only the following +modules and their versions: + +* `otel`: `v0.14.0` +* `otel/trace`: `v0.14.0` +* `otel/metric`: `v0.14.0` +* `otel/baggage`: `v0.14.0` +* `otel/sdk/trace`: `v0.14.0` +* `otel/sdk/metric`: `v0.14.0` + +These modules have been developed to a point where the `otel/trace`, +`otel/baggage`, and `otel/sdk/trace` modules have reached a point that they +should be considered for a stable release. The `otel/metric` and +`otel/sdk/metric` are still under active development and the `otel` module +depends on both `otel/trace` and `otel/metric`. + +The `otel` package is refactored to remove its dependencies on `otel/metric` so +it can be released as stable as well. With that done the following release +candidates are made: + +* `otel`: `v1.0.0-RC1` +* `otel/trace`: `v1.0.0-RC1` +* `otel/baggage`: `v1.0.0-RC1` +* `otel/sdk/trace`: `v1.0.0-RC1` + +The `otel/metric` and `otel/sdk/metric` modules remain at `v0.14.0`. + +A few minor issues are discovered in the `otel/trace` package. These issues are +resolved with some minor, but backwards incompatible, changes and are released +as a second release candidate: + +* `otel`: `v1.0.0-RC2` +* `otel/trace`: `v1.0.0-RC2` +* `otel/baggage`: `v1.0.0-RC2` +* `otel/sdk/trace`: `v1.0.0-RC2` + +Notice that all module version numbers are incremented to adhere to our +versioning policy. + +After these release candidates have been evaluated to satisfaction, they are +released as version `v1.0.0`. + +* `otel`: `v1.0.0` +* `otel/trace`: `v1.0.0` +* `otel/baggage`: `v1.0.0` +* `otel/sdk/trace`: `v1.0.0` + +Since both the `go` utility and the Go module system support [the semantic +versioning definition of +precedence](https://semver.org/spec/v2.0.0.html#spec-item-11), this release +will correctly be interpreted as the successor to the previous release +candidates. + +Active development of this project continues. The `otel/metric` module now has +backwards incompatible changes to its API that need to be released and the +`otel/baggage` module has a minor bug fix that needs to be released. The +following release is made: + +* `otel`: `v1.0.1` +* `otel/trace`: `v1.0.1` +* `otel/metric`: `v0.15.0` +* `otel/baggage`: `v1.0.1` +* `otel/sdk/trace`: `v1.0.1` +* `otel/sdk/metric`: `v0.15.0` + +Notice that, again, all stable module versions are incremented in unison and +the `otel/sdk/metric` package, which depends on the `otel/metric` package, also +bumped its version. This bump of the `otel/sdk/metric` package makes sense +given their coupling, though it is not explicitly required by our versioning +policy. + +As we progress, the `otel/metric` and `otel/sdk/metric` packages have reached a +point where they should be evaluated for stability. The `otel` module is +reintegrated with the `otel/metric` package and the following release is made: + +* `otel`: `v1.1.0-RC1` +* `otel/trace`: `v1.1.0-RC1` +* `otel/metric`: `v1.1.0-RC1` +* `otel/baggage`: `v1.1.0-RC1` +* `otel/sdk/trace`: `v1.1.0-RC1` +* `otel/sdk/metric`: `v1.1.0-RC1` + +All the modules are evaluated and determined to a viable stable release. They +are then released as version `v1.1.0` (the minor version is incremented to +indicate the addition of new signal). + +* `otel`: `v1.1.0` +* `otel/trace`: `v1.1.0` +* `otel/metric`: `v1.1.0` +* `otel/baggage`: `v1.1.0` +* `otel/sdk/trace`: `v1.1.0` +* `otel/sdk/metric`: `v1.1.0` diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/trace_nongo11.go b/src/runtime/vendor/go.opentelemetry.io/otel/attribute/doc.go similarity index 73% rename from src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/trace_nongo11.go rename to src/runtime/vendor/go.opentelemetry.io/otel/attribute/doc.go index d794329a7..dafe7424d 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/trace_nongo11.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/attribute/doc.go @@ -12,14 +12,5 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !go1.11 - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -import ( - "context" -) - -func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) { - return ctx, func() {} -} +// Package attribute provides key and value attributes. +package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/label/encoder.go b/src/runtime/vendor/go.opentelemetry.io/otel/attribute/encoder.go similarity index 98% rename from src/runtime/vendor/go.opentelemetry.io/otel/label/encoder.go rename to src/runtime/vendor/go.opentelemetry.io/otel/attribute/encoder.go index 4620873df..8b940f78d 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/label/encoder.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/attribute/encoder.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package label // import "go.opentelemetry.io/otel/label" +package attribute // import "go.opentelemetry.io/otel/attribute" import ( "bytes" @@ -28,7 +28,7 @@ type ( Encoder interface { // Encode returns the serialized encoding of the label // set using its Iterator. This result may be cached - // by a label.Set. + // by a attribute.Set. Encode(iterator Iterator) string // ID returns a value that is unique for each class of diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/label/iterator.go b/src/runtime/vendor/go.opentelemetry.io/otel/attribute/iterator.go similarity index 96% rename from src/runtime/vendor/go.opentelemetry.io/otel/label/iterator.go rename to src/runtime/vendor/go.opentelemetry.io/otel/attribute/iterator.go index a968e7668..e03aabb62 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/label/iterator.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/attribute/iterator.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package label // import "go.opentelemetry.io/otel/label" +package attribute // import "go.opentelemetry.io/otel/attribute" // Iterator allows iterating over the set of labels in order, // sorted by key. @@ -55,7 +55,7 @@ func (i *Iterator) Attribute() KeyValue { return i.Label() } -// IndexedLabel returns current index and label. Must be called only +// IndexedLabel returns current index and attribute. Must be called only // after Next returns true. func (i *Iterator) IndexedLabel() (int, KeyValue) { return i.idx, i.Label() diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/attribute/key.go b/src/runtime/vendor/go.opentelemetry.io/otel/attribute/key.go new file mode 100644 index 000000000..0656a04e4 --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/attribute/key.go @@ -0,0 +1,134 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +// Key represents the key part in key-value pairs. It's a string. The +// allowed character set in the key depends on the use of the key. +type Key string + +// Bool creates a KeyValue instance with a BOOL Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Bool(name, value). +func (k Key) Bool(v bool) KeyValue { + return KeyValue{ + Key: k, + Value: BoolValue(v), + } +} + +// BoolSlice creates a KeyValue instance with a BOOLSLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- BoolSlice(name, value). +func (k Key) BoolSlice(v []bool) KeyValue { + return KeyValue{ + Key: k, + Value: BoolSliceValue(v), + } +} + +// Int creates a KeyValue instance with an INT64 Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Int(name, value). +func (k Key) Int(v int) KeyValue { + return KeyValue{ + Key: k, + Value: IntValue(v), + } +} + +// IntSlice creates a KeyValue instance with an INT64SLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- IntSlice(name, value). +func (k Key) IntSlice(v []int) KeyValue { + return KeyValue{ + Key: k, + Value: IntSliceValue(v), + } +} + +// Int64 creates a KeyValue instance with an INT64 Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Int64(name, value). +func (k Key) Int64(v int64) KeyValue { + return KeyValue{ + Key: k, + Value: Int64Value(v), + } +} + +// Int64Slice creates a KeyValue instance with an INT64SLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Int64Slice(name, value). +func (k Key) Int64Slice(v []int64) KeyValue { + return KeyValue{ + Key: k, + Value: Int64SliceValue(v), + } +} + +// Float64 creates a KeyValue instance with a FLOAT64 Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Float64(name, value). +func (k Key) Float64(v float64) KeyValue { + return KeyValue{ + Key: k, + Value: Float64Value(v), + } +} + +// Float64Slice creates a KeyValue instance with a FLOAT64SLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Float64(name, value). +func (k Key) Float64Slice(v []float64) KeyValue { + return KeyValue{ + Key: k, + Value: Float64SliceValue(v), + } +} + +// String creates a KeyValue instance with a STRING Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- String(name, value). +func (k Key) String(v string) KeyValue { + return KeyValue{ + Key: k, + Value: StringValue(v), + } +} + +// StringSlice creates a KeyValue instance with a STRINGSLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- StringSlice(name, value). +func (k Key) StringSlice(v []string) KeyValue { + return KeyValue{ + Key: k, + Value: StringSliceValue(v), + } +} + +// Defined returns true for non-empty keys. +func (k Key) Defined() bool { + return len(k) != 0 +} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/attribute/kv.go b/src/runtime/vendor/go.opentelemetry.io/otel/attribute/kv.go new file mode 100644 index 000000000..8f5793385 --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/attribute/kv.go @@ -0,0 +1,86 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "fmt" +) + +// KeyValue holds a key and value pair. +type KeyValue struct { + Key Key + Value Value +} + +// Valid returns if kv is a valid OpenTelemetry attribute. +func (kv KeyValue) Valid() bool { + return kv.Key != "" && kv.Value.Type() != INVALID +} + +// Bool creates a KeyValue with a BOOL Value type. +func Bool(k string, v bool) KeyValue { + return Key(k).Bool(v) +} + +// BoolSlice creates a KeyValue with a BOOLSLICE Value type. +func BoolSlice(k string, v []bool) KeyValue { + return Key(k).BoolSlice(v) +} + +// Int creates a KeyValue with an INT64 Value type. +func Int(k string, v int) KeyValue { + return Key(k).Int(v) +} + +// IntSlice creates a KeyValue with an INT64SLICE Value type. +func IntSlice(k string, v []int) KeyValue { + return Key(k).IntSlice(v) +} + +// Int64 creates a KeyValue with an INT64 Value type. +func Int64(k string, v int64) KeyValue { + return Key(k).Int64(v) +} + +// Int64Slice creates a KeyValue with an INT64SLICE Value type. +func Int64Slice(k string, v []int64) KeyValue { + return Key(k).Int64Slice(v) +} + +// Float64 creates a KeyValue with a FLOAT64 Value type. +func Float64(k string, v float64) KeyValue { + return Key(k).Float64(v) +} + +// Float64Slice creates a KeyValue with a FLOAT64SLICE Value type. +func Float64Slice(k string, v []float64) KeyValue { + return Key(k).Float64Slice(v) +} + +// String creates a KeyValue with a STRING Value type. +func String(k, v string) KeyValue { + return Key(k).String(v) +} + +// StringSlice creates a KeyValue with a STRINGSLICE Value type. +func StringSlice(k string, v []string) KeyValue { + return Key(k).StringSlice(v) +} + +// Stringer creates a new key-value pair with a passed name and a string +// value generated by the passed Stringer interface. +func Stringer(k string, v fmt.Stringer) KeyValue { + return Key(k).String(v.String()) +} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/label/set.go b/src/runtime/vendor/go.opentelemetry.io/otel/attribute/set.go similarity index 91% rename from src/runtime/vendor/go.opentelemetry.io/otel/label/set.go rename to src/runtime/vendor/go.opentelemetry.io/otel/attribute/set.go index 2d2f00450..bd591894d 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/label/set.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -12,13 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -package label // import "go.opentelemetry.io/otel/label" +package attribute // import "go.opentelemetry.io/otel/attribute" import ( "encoding/json" "reflect" "sort" - "sync" ) type ( @@ -35,10 +34,6 @@ type ( // 3. Correlation map (TODO) Set struct { equivalent Distinct - - lock sync.Mutex - encoders [maxConcurrentEncoders]EncoderID - encoded [maxConcurrentEncoders]string } // Distinct wraps a variable-size array of `KeyValue`, @@ -76,8 +71,6 @@ var ( } ) -const maxConcurrentEncoders = 3 - // EmptySet returns a reference to a Set with no elements. // // This is a convenience provided for optimized calling utility. @@ -182,51 +175,13 @@ func (l *Set) Equals(o *Set) bool { } // Encoded returns the encoded form of this set, according to -// `encoder`. The result will be cached in this `*Set`. +// `encoder`. func (l *Set) Encoded(encoder Encoder) string { if l == nil || encoder == nil { return "" } - id := encoder.ID() - if !id.Valid() { - // Invalid IDs are not cached. - return encoder.Encode(l.Iter()) - } - - var lookup *string - l.lock.Lock() - for idx := 0; idx < maxConcurrentEncoders; idx++ { - if l.encoders[idx] == id { - lookup = &l.encoded[idx] - break - } - } - l.lock.Unlock() - - if lookup != nil { - return *lookup - } - - r := encoder.Encode(l.Iter()) - - l.lock.Lock() - defer l.lock.Unlock() - - for idx := 0; idx < maxConcurrentEncoders; idx++ { - if l.encoders[idx] == id { - return l.encoded[idx] - } - if !l.encoders[idx].Valid() { - l.encoders[idx] = id - l.encoded[idx] = r - return r - } - } - - // TODO: This is a performance cliff. Find a way for this to - // generate a warning. - return r + return encoder.Encode(l.Iter()) } func empty() Set { @@ -246,7 +201,7 @@ func NewSet(kvs ...KeyValue) Set { return empty() } s, _ := NewSetWithSortableFiltered(kvs, new(Sortable), nil) - return s //nolint + return s } // NewSetWithSortable returns a new `Set`. See the documentation for @@ -259,7 +214,7 @@ func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set { return empty() } s, _ := NewSetWithSortableFiltered(kvs, tmp, nil) - return s //nolint + return s } // NewSetWithFiltered returns a new `Set`. See the documentation for @@ -295,7 +250,7 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { // - allocating a `Set` for storing the return value of this // constructor. // -// The result maintains a cache of encoded labels, by label.EncoderID. +// The result maintains a cache of encoded labels, by attribute.EncoderID. // This value should not be copied after its first use. // // The second `[]KeyValue` return value is a list of labels that were diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/label/type_string.go b/src/runtime/vendor/go.opentelemetry.io/otel/attribute/type_string.go similarity index 60% rename from src/runtime/vendor/go.opentelemetry.io/otel/label/type_string.go rename to src/runtime/vendor/go.opentelemetry.io/otel/attribute/type_string.go index 62afeb60a..e584b2477 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/label/type_string.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/attribute/type_string.go @@ -1,6 +1,6 @@ // Code generated by "stringer -type=Type"; DO NOT EDIT. -package label +package attribute import "strconv" @@ -10,19 +10,18 @@ func _() { var x [1]struct{} _ = x[INVALID-0] _ = x[BOOL-1] - _ = x[INT32-2] - _ = x[INT64-3] - _ = x[UINT32-4] - _ = x[UINT64-5] - _ = x[FLOAT32-6] - _ = x[FLOAT64-7] - _ = x[STRING-8] - _ = x[ARRAY-9] + _ = x[INT64-2] + _ = x[FLOAT64-3] + _ = x[STRING-4] + _ = x[BOOLSLICE-5] + _ = x[INT64SLICE-6] + _ = x[FLOAT64SLICE-7] + _ = x[STRINGSLICE-8] } -const _Type_name = "INVALIDBOOLINT32INT64UINT32UINT64FLOAT32FLOAT64STRINGARRAY" +const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICESTRINGSLICE" -var _Type_index = [...]uint8{0, 7, 11, 16, 21, 27, 33, 40, 47, 53, 58} +var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71} func (i Type) String() string { if i < 0 || i >= Type(len(_Type_index)-1) { diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/attribute/value.go b/src/runtime/vendor/go.opentelemetry.io/otel/attribute/value.go new file mode 100644 index 000000000..545bea50c --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -0,0 +1,271 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "encoding/json" + "fmt" + "strconv" + + "go.opentelemetry.io/otel/internal" +) + +//go:generate stringer -type=Type + +// Type describes the type of the data Value holds. +type Type int + +// Value represents the value part in key-value pairs. +type Value struct { + vtype Type + numeric uint64 + stringly string + slice interface{} +} + +const ( + // INVALID is used for a Value with no value set. + INVALID Type = iota + // BOOL is a boolean Type Value. + BOOL + // INT64 is a 64-bit signed integral Type Value. + INT64 + // FLOAT64 is a 64-bit floating point Type Value. + FLOAT64 + // STRING is a string Type Value. + STRING + // BOOLSLICE is a slice of booleans Type Value. + BOOLSLICE + // INT64SLICE is a slice of 64-bit signed integral numbers Type Value. + INT64SLICE + // FLOAT64SLICE is a slice of 64-bit floating point numbers Type Value. + FLOAT64SLICE + // STRINGSLICE is a slice of strings Type Value. + STRINGSLICE +) + +// BoolValue creates a BOOL Value. +func BoolValue(v bool) Value { + return Value{ + vtype: BOOL, + numeric: internal.BoolToRaw(v), + } +} + +// BoolSliceValue creates a BOOLSLICE Value. +func BoolSliceValue(v []bool) Value { + cp := make([]bool, len(v)) + copy(cp, v) + return Value{ + vtype: BOOLSLICE, + slice: &cp, + } +} + +// IntValue creates an INT64 Value. +func IntValue(v int) Value { + return Int64Value(int64(v)) +} + +// IntSliceValue creates an INTSLICE Value. +func IntSliceValue(v []int) Value { + cp := make([]int64, 0, len(v)) + for _, i := range v { + cp = append(cp, int64(i)) + } + return Value{ + vtype: INT64SLICE, + slice: &cp, + } +} + +// Int64Value creates an INT64 Value. +func Int64Value(v int64) Value { + return Value{ + vtype: INT64, + numeric: internal.Int64ToRaw(v), + } +} + +// Int64SliceValue creates an INT64SLICE Value. +func Int64SliceValue(v []int64) Value { + cp := make([]int64, len(v)) + copy(cp, v) + return Value{ + vtype: INT64SLICE, + slice: &cp, + } +} + +// Float64Value creates a FLOAT64 Value. +func Float64Value(v float64) Value { + return Value{ + vtype: FLOAT64, + numeric: internal.Float64ToRaw(v), + } +} + +// Float64SliceValue creates a FLOAT64SLICE Value. +func Float64SliceValue(v []float64) Value { + cp := make([]float64, len(v)) + copy(cp, v) + return Value{ + vtype: FLOAT64SLICE, + slice: &cp, + } +} + +// StringValue creates a STRING Value. +func StringValue(v string) Value { + return Value{ + vtype: STRING, + stringly: v, + } +} + +// StringSliceValue creates a STRINGSLICE Value. +func StringSliceValue(v []string) Value { + cp := make([]string, len(v)) + copy(cp, v) + return Value{ + vtype: STRINGSLICE, + slice: &cp, + } +} + +// Type returns a type of the Value. +func (v Value) Type() Type { + return v.vtype +} + +// AsBool returns the bool value. Make sure that the Value's type is +// BOOL. +func (v Value) AsBool() bool { + return internal.RawToBool(v.numeric) +} + +// AsBoolSlice returns the []bool value. Make sure that the Value's type is +// BOOLSLICE. +func (v Value) AsBoolSlice() []bool { + if s, ok := v.slice.(*[]bool); ok { + return *s + } + return nil +} + +// AsInt64 returns the int64 value. Make sure that the Value's type is +// INT64. +func (v Value) AsInt64() int64 { + return internal.RawToInt64(v.numeric) +} + +// AsInt64Slice returns the []int64 value. Make sure that the Value's type is +// INT64SLICE. +func (v Value) AsInt64Slice() []int64 { + if s, ok := v.slice.(*[]int64); ok { + return *s + } + return nil +} + +// AsFloat64 returns the float64 value. Make sure that the Value's +// type is FLOAT64. +func (v Value) AsFloat64() float64 { + return internal.RawToFloat64(v.numeric) +} + +// AsFloat64Slice returns the []float64 value. Make sure that the Value's type is +// INT64SLICE. +func (v Value) AsFloat64Slice() []float64 { + if s, ok := v.slice.(*[]float64); ok { + return *s + } + return nil +} + +// AsString returns the string value. Make sure that the Value's type +// is STRING. +func (v Value) AsString() string { + return v.stringly +} + +// AsStringSlice returns the []string value. Make sure that the Value's type is +// INT64SLICE. +func (v Value) AsStringSlice() []string { + if s, ok := v.slice.(*[]string); ok { + return *s + } + return nil +} + +type unknownValueType struct{} + +// AsInterface returns Value's data as interface{}. +func (v Value) AsInterface() interface{} { + switch v.Type() { + case BOOL: + return v.AsBool() + case BOOLSLICE: + return v.AsBoolSlice() + case INT64: + return v.AsInt64() + case INT64SLICE: + return v.AsInt64Slice() + case FLOAT64: + return v.AsFloat64() + case FLOAT64SLICE: + return v.AsFloat64Slice() + case STRING: + return v.stringly + case STRINGSLICE: + return v.AsStringSlice() + } + return unknownValueType{} +} + +// Emit returns a string representation of Value's data. +func (v Value) Emit() string { + switch v.Type() { + case BOOLSLICE: + return fmt.Sprint(*(v.slice.(*[]bool))) + case BOOL: + return strconv.FormatBool(v.AsBool()) + case INT64SLICE: + return fmt.Sprint(*(v.slice.(*[]int64))) + case INT64: + return strconv.FormatInt(v.AsInt64(), 10) + case FLOAT64SLICE: + return fmt.Sprint(*(v.slice.(*[]float64))) + case FLOAT64: + return fmt.Sprint(v.AsFloat64()) + case STRINGSLICE: + return fmt.Sprint(*(v.slice.(*[]string))) + case STRING: + return v.stringly + default: + return "unknown" + } +} + +// MarshalJSON returns the JSON encoding of the Value. +func (v Value) MarshalJSON() ([]byte, error) { + var jsonVal struct { + Type string + Value interface{} + } + jsonVal.Type = v.Type().String() + jsonVal.Value = v.AsInterface() + return json.Marshal(jsonVal) +} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/src/runtime/vendor/go.opentelemetry.io/otel/baggage/baggage.go new file mode 100644 index 000000000..8de2bc9df --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -0,0 +1,509 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package baggage + +import ( + "errors" + "fmt" + "net/url" + "regexp" + "strings" + + "go.opentelemetry.io/otel/internal/baggage" +) + +const ( + maxMembers = 180 + maxBytesPerMembers = 4096 + maxBytesPerBaggageString = 8192 + + listDelimiter = "," + keyValueDelimiter = "=" + propertyDelimiter = ";" + + keyDef = `([\x21\x23-\x27\x2A\x2B\x2D\x2E\x30-\x39\x41-\x5a\x5e-\x7a\x7c\x7e]+)` + valueDef = `([\x21\x23-\x2b\x2d-\x3a\x3c-\x5B\x5D-\x7e]*)` + keyValueDef = `\s*` + keyDef + `\s*` + keyValueDelimiter + `\s*` + valueDef + `\s*` +) + +var ( + keyRe = regexp.MustCompile(`^` + keyDef + `$`) + valueRe = regexp.MustCompile(`^` + valueDef + `$`) + propertyRe = regexp.MustCompile(`^(?:\s*` + keyDef + `\s*|` + keyValueDef + `)$`) +) + +var ( + errInvalidKey = errors.New("invalid key") + errInvalidValue = errors.New("invalid value") + errInvalidProperty = errors.New("invalid baggage list-member property") + errInvalidMember = errors.New("invalid baggage list-member") + errMemberNumber = errors.New("too many list-members in baggage-string") + errMemberBytes = errors.New("list-member too large") + errBaggageBytes = errors.New("baggage-string too large") +) + +// Property is an additional metadata entry for a baggage list-member. +type Property struct { + key, value string + + // hasValue indicates if a zero-value value means the property does not + // have a value or if it was the zero-value. + hasValue bool +} + +func NewKeyProperty(key string) (Property, error) { + p := Property{} + if !keyRe.MatchString(key) { + return p, fmt.Errorf("%w: %q", errInvalidKey, key) + } + p.key = key + return p, nil +} + +func NewKeyValueProperty(key, value string) (Property, error) { + p := Property{} + if !keyRe.MatchString(key) { + return p, fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !valueRe.MatchString(value) { + return p, fmt.Errorf("%w: %q", errInvalidValue, value) + } + p.key = key + p.value = value + p.hasValue = true + return p, nil +} + +// parseProperty attempts to decode a Property from the passed string. It +// returns an error if the input is invalid according to the W3C Baggage +// specification. +func parseProperty(property string) (Property, error) { + p := Property{} + if property == "" { + return p, nil + } + + match := propertyRe.FindStringSubmatch(property) + if len(match) != 4 { + return p, fmt.Errorf("%w: %q", errInvalidProperty, property) + } + + if match[1] != "" { + p.key = match[1] + } else { + p.key = match[2] + p.value = match[3] + p.hasValue = true + } + return p, nil +} + +// validate ensures p conforms to the W3C Baggage specification, returning an +// error otherwise. +func (p Property) validate() error { + errFunc := func(err error) error { + return fmt.Errorf("invalid property: %w", err) + } + + if !keyRe.MatchString(p.key) { + return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key)) + } + if p.hasValue && !valueRe.MatchString(p.value) { + return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value)) + } + if !p.hasValue && p.value != "" { + return errFunc(errors.New("inconsistent value")) + } + return nil +} + +// Key returns the Property key. +func (p Property) Key() string { + return p.key +} + +// Value returns the Property value. Additionally a boolean value is returned +// indicating if the returned value is the empty if the Property has a value +// that is empty or if the value is not set. +func (p Property) Value() (string, bool) { + return p.value, p.hasValue +} + +// String encodes Property into a string compliant with the W3C Baggage +// specification. +func (p Property) String() string { + if p.hasValue { + return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, p.value) + } + return p.key +} + +type properties []Property + +func fromInternalProperties(iProps []baggage.Property) properties { + if len(iProps) == 0 { + return nil + } + + props := make(properties, len(iProps)) + for i, p := range iProps { + props[i] = Property{ + key: p.Key, + value: p.Value, + hasValue: p.HasValue, + } + } + return props +} + +func (p properties) asInternal() []baggage.Property { + if len(p) == 0 { + return nil + } + + iProps := make([]baggage.Property, len(p)) + for i, prop := range p { + iProps[i] = baggage.Property{ + Key: prop.key, + Value: prop.value, + HasValue: prop.hasValue, + } + } + return iProps +} + +func (p properties) Copy() properties { + if len(p) == 0 { + return nil + } + + props := make(properties, len(p)) + copy(props, p) + return props +} + +// validate ensures each Property in p conforms to the W3C Baggage +// specification, returning an error otherwise. +func (p properties) validate() error { + for _, prop := range p { + if err := prop.validate(); err != nil { + return err + } + } + return nil +} + +// String encodes properties into a string compliant with the W3C Baggage +// specification. +func (p properties) String() string { + props := make([]string, len(p)) + for i, prop := range p { + props[i] = prop.String() + } + return strings.Join(props, propertyDelimiter) +} + +// Member is a list-member of a baggage-string as defined by the W3C Baggage +// specification. +type Member struct { + key, value string + properties properties +} + +// NewMember returns a new Member from the passed arguments. An error is +// returned if the created Member would be invalid according to the W3C +// Baggage specification. +func NewMember(key, value string, props ...Property) (Member, error) { + m := Member{key: key, value: value, properties: properties(props).Copy()} + if err := m.validate(); err != nil { + return Member{}, err + } + + return m, nil +} + +// parseMember attempts to decode a Member from the passed string. It returns +// an error if the input is invalid according to the W3C Baggage +// specification. +func parseMember(member string) (Member, error) { + if n := len(member); n > maxBytesPerMembers { + return Member{}, fmt.Errorf("%w: %d", errMemberBytes, n) + } + + var ( + key, value string + props properties + ) + + parts := strings.SplitN(member, propertyDelimiter, 2) + switch len(parts) { + case 2: + // Parse the member properties. + for _, pStr := range strings.Split(parts[1], propertyDelimiter) { + p, err := parseProperty(pStr) + if err != nil { + return Member{}, err + } + props = append(props, p) + } + fallthrough + case 1: + // Parse the member key/value pair. + + // Take into account a value can contain equal signs (=). + kv := strings.SplitN(parts[0], keyValueDelimiter, 2) + if len(kv) != 2 { + return Member{}, fmt.Errorf("%w: %q", errInvalidMember, member) + } + // "Leading and trailing whitespaces are allowed but MUST be trimmed + // when converting the header into a data structure." + key, value = strings.TrimSpace(kv[0]), strings.TrimSpace(kv[1]) + if !keyRe.MatchString(key) { + return Member{}, fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !valueRe.MatchString(value) { + return Member{}, fmt.Errorf("%w: %q", errInvalidValue, value) + } + default: + // This should never happen unless a developer has changed the string + // splitting somehow. Panic instead of failing silently and allowing + // the bug to slip past the CI checks. + panic("failed to parse baggage member") + } + + return Member{key: key, value: value, properties: props}, nil +} + +// validate ensures m conforms to the W3C Baggage specification, returning an +// error otherwise. +func (m Member) validate() error { + if !keyRe.MatchString(m.key) { + return fmt.Errorf("%w: %q", errInvalidKey, m.key) + } + if !valueRe.MatchString(m.value) { + return fmt.Errorf("%w: %q", errInvalidValue, m.value) + } + return m.properties.validate() +} + +// Key returns the Member key. +func (m Member) Key() string { return m.key } + +// Value returns the Member value. +func (m Member) Value() string { return m.value } + +// Properties returns a copy of the Member properties. +func (m Member) Properties() []Property { return m.properties.Copy() } + +// String encodes Member into a string compliant with the W3C Baggage +// specification. +func (m Member) String() string { + // A key is just an ASCII string, but a value is URL encoded UTF-8. + s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, url.QueryEscape(m.value)) + if len(m.properties) > 0 { + s = fmt.Sprintf("%s%s%s", s, propertyDelimiter, m.properties.String()) + } + return s +} + +// Baggage is a list of baggage members representing the baggage-string as +// defined by the W3C Baggage specification. +type Baggage struct { //nolint:golint + list baggage.List +} + +// New returns a new valid Baggage. It returns an error if the passed members +// are invalid according to the W3C Baggage specification or if it results in +// a Baggage exceeding limits set in that specification. +func New(members ...Member) (Baggage, error) { + if len(members) == 0 { + return Baggage{}, nil + } + + b := make(baggage.List) + for _, m := range members { + if err := m.validate(); err != nil { + return Baggage{}, err + } + // OpenTelemetry resolves duplicates by last-one-wins. + b[m.key] = baggage.Item{ + Value: m.value, + Properties: m.properties.asInternal(), + } + } + + // Check member numbers after deduplicating. + if len(b) > maxMembers { + return Baggage{}, errMemberNumber + } + + bag := Baggage{b} + if n := len(bag.String()); n > maxBytesPerBaggageString { + return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n) + } + + return bag, nil +} + +// Parse attempts to decode a baggage-string from the passed string. It +// returns an error if the input is invalid according to the W3C Baggage +// specification. +// +// If there are duplicate list-members contained in baggage, the last one +// defined (reading left-to-right) will be the only one kept. This diverges +// from the W3C Baggage specification which allows duplicate list-members, but +// conforms to the OpenTelemetry Baggage specification. +func Parse(bStr string) (Baggage, error) { + if bStr == "" { + return Baggage{}, nil + } + + if n := len(bStr); n > maxBytesPerBaggageString { + return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n) + } + + b := make(baggage.List) + for _, memberStr := range strings.Split(bStr, listDelimiter) { + m, err := parseMember(memberStr) + if err != nil { + return Baggage{}, err + } + // OpenTelemetry resolves duplicates by last-one-wins. + b[m.key] = baggage.Item{ + Value: m.value, + Properties: m.properties.asInternal(), + } + } + + // OpenTelemetry does not allow for duplicate list-members, but the W3C + // specification does. Now that we have deduplicated, ensure the baggage + // does not exceed list-member limits. + if len(b) > maxMembers { + return Baggage{}, errMemberNumber + } + + return Baggage{b}, nil +} + +// Member returns the baggage list-member identified by key. +// +// If there is no list-member matching the passed key the returned Member will +// be a zero-value Member. +func (b Baggage) Member(key string) Member { + v, ok := b.list[key] + if !ok { + // We do not need to worry about distiguising between the situation + // where a zero-valued Member is included in the Baggage because a + // zero-valued Member is invalid according to the W3C Baggage + // specification (it has an empty key). + return Member{} + } + + return Member{ + key: key, + value: v.Value, + properties: fromInternalProperties(v.Properties), + } +} + +// Members returns all the baggage list-members. +// The order of the returned list-members does not have significance. +func (b Baggage) Members() []Member { + if len(b.list) == 0 { + return nil + } + + members := make([]Member, 0, len(b.list)) + for k, v := range b.list { + members = append(members, Member{ + key: k, + value: v.Value, + properties: fromInternalProperties(v.Properties), + }) + } + return members +} + +// SetMember returns a copy the Baggage with the member included. If the +// baggage contains a Member with the same key the existing Member is +// replaced. +// +// If member is invalid according to the W3C Baggage specification, an error +// is returned with the original Baggage. +func (b Baggage) SetMember(member Member) (Baggage, error) { + if err := member.validate(); err != nil { + return b, fmt.Errorf("%w: %s", errInvalidMember, err) + } + + n := len(b.list) + if _, ok := b.list[member.key]; !ok { + n++ + } + list := make(baggage.List, n) + + for k, v := range b.list { + // Do not copy if we are just going to overwrite. + if k == member.key { + continue + } + list[k] = v + } + + list[member.key] = baggage.Item{ + Value: member.value, + Properties: member.properties.asInternal(), + } + + return Baggage{list: list}, nil +} + +// DeleteMember returns a copy of the Baggage with the list-member identified +// by key removed. +func (b Baggage) DeleteMember(key string) Baggage { + n := len(b.list) + if _, ok := b.list[key]; ok { + n-- + } + list := make(baggage.List, n) + + for k, v := range b.list { + if k == key { + continue + } + list[k] = v + } + + return Baggage{list: list} +} + +// Len returns the number of list-members in the Baggage. +func (b Baggage) Len() int { + return len(b.list) +} + +// String encodes Baggage into a string compliant with the W3C Baggage +// specification. The returned string will be invalid if the Baggage contains +// any invalid list-members. +func (b Baggage) String() string { + members := make([]string, 0, len(b.list)) + for k, v := range b.list { + members = append(members, Member{ + key: k, + value: v.Value, + properties: fromInternalProperties(v.Properties), + }.String()) + } + return strings.Join(members, listDelimiter) +} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/baggage/context.go b/src/runtime/vendor/go.opentelemetry.io/otel/baggage/context.go new file mode 100644 index 000000000..24b34b756 --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/baggage/context.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package baggage // import "go.opentelemetry.io/otel/baggage" + +import ( + "context" + + "go.opentelemetry.io/otel/internal/baggage" +) + +// ContextWithBaggage returns a copy of parent with baggage. +func ContextWithBaggage(parent context.Context, b Baggage) context.Context { + // Delegate so any hooks for the OpenTracing bridge are handled. + return baggage.ContextWithList(parent, b.list) +} + +// ContextWithoutBaggage returns a copy of parent with no baggage. +func ContextWithoutBaggage(parent context.Context) context.Context { + // Delegate so any hooks for the OpenTracing bridge are handled. + return baggage.ContextWithList(parent, nil) +} + +// FromContext returns the baggage contained in ctx. +func FromContext(ctx context.Context) Baggage { + // Delegate so any hooks for the OpenTracing bridge are handled. + return Baggage{list: baggage.ListFromContext(ctx)} +} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/unit/doc.go b/src/runtime/vendor/go.opentelemetry.io/otel/baggage/doc.go similarity index 65% rename from src/runtime/vendor/go.opentelemetry.io/otel/unit/doc.go rename to src/runtime/vendor/go.opentelemetry.io/otel/baggage/doc.go index 0d77a750c..4545100df 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/unit/doc.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/baggage/doc.go @@ -12,9 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package unit provides units. -// -// This package is currently in a pre-GA phase. Backwards incompatible changes -// may be introduced in subsequent minor version releases as we work to track -// the evolving OpenTelemetry specification and user feedback. -package unit // import "go.opentelemetry.io/otel/unit" +/* +Package baggage provides functionality for storing and retrieving +baggage items in Go context. For propagating the baggage, see the +go.opentelemetry.io/otel/propagation package. +*/ +package baggage // import "go.opentelemetry.io/otel/baggage" diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/codes/doc.go b/src/runtime/vendor/go.opentelemetry.io/otel/codes/doc.go index 368f8818f..df3e0f1b6 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/codes/doc.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/codes/doc.go @@ -15,11 +15,7 @@ /* Package codes defines the canonical error codes used by OpenTelemetry. -This package is currently in a pre-GA phase. Backwards incompatible changes -may be introduced in subsequent minor version releases as we work to track -the evolving OpenTelemetry specification and user feedback. - It conforms to [the OpenTelemetry -specification](https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/api.md#statuscanonicalcode). +specification](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#statuscanonicalcode). */ package codes // import "go.opentelemetry.io/otel/codes" diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/doc.go b/src/runtime/vendor/go.opentelemetry.io/otel/doc.go index 953ab998d..daa36c89d 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/doc.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/doc.go @@ -16,16 +16,14 @@ Package otel provides global access to the OpenTelemetry API. The subpackages of the otel package provide an implementation of the OpenTelemetry API. -This package is currently in a pre-GA phase. Backwards incompatible changes -may be introduced in subsequent minor version releases as we work to track the -evolving OpenTelemetry specification and user feedback. - The provided API is used to instrument code and measure data about that code's performance and operation. The measured data, by default, is not processed or transmitted anywhere. An implementation of the OpenTelemetry SDK, like the default SDK implementation (go.opentelemetry.io/otel/sdk), and associated exporters are used to process and transport this data. +To read the getting started guide, see https://opentelemetry.io/docs/go/getting-started/. + To read more about tracing, see go.opentelemetry.io/otel/trace. To read more about metrics, see go.opentelemetry.io/otel/metric. diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/error_handler.go b/src/runtime/vendor/go.opentelemetry.io/otel/error_handler.go index ac42f8be0..72fad8541 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/error_handler.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/error_handler.go @@ -16,7 +16,23 @@ package otel // import "go.opentelemetry.io/otel" // ErrorHandler handles irremediable events. type ErrorHandler interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + // Handle handles any error deemed irremediable by an OpenTelemetry // component. Handle(error) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// ErrorHandlerFunc is a convenience adapter to allow the use of a function +// as an ErrorHandler. +type ErrorHandlerFunc func(error) + +var _ ErrorHandler = ErrorHandlerFunc(nil) + +// Handle handles the irremediable error by calling the ErrorHandlerFunc itself. +func (f ErrorHandlerFunc) Handle(err error) { + f(err) } diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/LICENSE b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/LICENSE similarity index 100% rename from src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/LICENSE rename to src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/LICENSE diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/README.md b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/README.md new file mode 100644 index 000000000..598c569a5 --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/README.md @@ -0,0 +1,50 @@ +# OpenTelemetry-Go Jaeger Exporter + +[![Go Reference](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/jaeger.svg)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger) + +[OpenTelemetry span exporter for Jaeger](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/sdk_exporters/jaeger.md) implementation. + +## Installation + +``` +go get -u go.opentelemetry.io/otel/exporters/jaeger +``` + +## Example + +See [../../example/jaeger](../../example/jaeger). + +## Configuration + +The exporter can be used to send spans to: + +- Jaeger agent using `jaeger.thrift` over compact thrift protocol via + [`WithAgentEndpoint`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithAgentEndpoint) option. +- Jaeger collector using `jaeger.thrift` over HTTP via + [`WithCollectorEndpoint`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithCollectorEndpoint) option. + +### Environment Variables + +The following environment variables can be used +(instead of options objects) to override the default configuration. + +| Environment variable | Option | Default value | +| --------------------------------- | --------------------------------------------------------------------------------------------- | ----------------------------------- | +| `OTEL_EXPORTER_JAEGER_AGENT_HOST` | [`WithAgentHost`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithAgentHost) | `localhost` | +| `OTEL_EXPORTER_JAEGER_AGENT_PORT` | [`WithAgentPort`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithAgentPort) | `6831` | +| `OTEL_EXPORTER_JAEGER_ENDPOINT` | [`WithEndpoint`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithEndpoint) | `http://localhost:14268/api/traces` | +| `OTEL_EXPORTER_JAEGER_USER` | [`WithUsername`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithUsername) | | +| `OTEL_EXPORTER_JAEGER_PASSWORD` | [`WithPassword`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithPassword) | | + +Configuration using options have precedence over the environment variables. + +## Contributing + +This exporter uses a vendored copy of the Apache Thrift library (v0.14.1) at a custom import path. +When re-generating Thrift code in the future, please adapt import paths as necessary. + +## References + +- [Jaeger](https://www.jaegertracing.io/) +- [OpenTelemetry to Jaeger Transformation](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/sdk_exporters/jaeger.md) +- [OpenTelemetry Environment Variable Specification](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/sdk-environment-variables.md) diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/agent.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/agent.go new file mode 100644 index 000000000..80cc1d2f6 --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/agent.go @@ -0,0 +1,203 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger" + +import ( + "context" + "fmt" + "io" + "log" + "net" + "strings" + "time" + + "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" + + genAgent "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent" + gen "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger" +) + +// udpPacketMaxLength is the max size of UDP packet we want to send, synced with jaeger-agent +const udpPacketMaxLength = 65000 + +// agentClientUDP is a UDP client to Jaeger agent that implements gen.Agent interface. +type agentClientUDP struct { + genAgent.Agent + io.Closer + + connUDP udpConn + client *genAgent.AgentClient + maxPacketSize int // max size of datagram in bytes + thriftBuffer *thrift.TMemoryBuffer // buffer used to calculate byte size of a span + thriftProtocol thrift.TProtocol +} + +type udpConn interface { + Write([]byte) (int, error) + SetWriteBuffer(int) error + Close() error +} + +type agentClientUDPParams struct { + Host string + Port string + MaxPacketSize int + Logger *log.Logger + AttemptReconnecting bool + AttemptReconnectInterval time.Duration +} + +// newAgentClientUDP creates a client that sends spans to Jaeger Agent over UDP. +func newAgentClientUDP(params agentClientUDPParams) (*agentClientUDP, error) { + hostPort := net.JoinHostPort(params.Host, params.Port) + // validate hostport + if _, _, err := net.SplitHostPort(hostPort); err != nil { + return nil, err + } + + if params.MaxPacketSize <= 0 { + params.MaxPacketSize = udpPacketMaxLength + } + + if params.AttemptReconnecting && params.AttemptReconnectInterval <= 0 { + params.AttemptReconnectInterval = time.Second * 30 + } + + thriftBuffer := thrift.NewTMemoryBufferLen(params.MaxPacketSize) + protocolFactory := thrift.NewTCompactProtocolFactoryConf(&thrift.TConfiguration{}) + thriftProtocol := protocolFactory.GetProtocol(thriftBuffer) + client := genAgent.NewAgentClientFactory(thriftBuffer, protocolFactory) + + var connUDP udpConn + var err error + + if params.AttemptReconnecting { + // host is hostname, setup resolver loop in case host record changes during operation + connUDP, err = newReconnectingUDPConn(hostPort, params.MaxPacketSize, params.AttemptReconnectInterval, net.ResolveUDPAddr, net.DialUDP, params.Logger) + if err != nil { + return nil, err + } + } else { + destAddr, err := net.ResolveUDPAddr("udp", hostPort) + if err != nil { + return nil, err + } + + connUDP, err = net.DialUDP(destAddr.Network(), nil, destAddr) + if err != nil { + return nil, err + } + } + + if err := connUDP.SetWriteBuffer(params.MaxPacketSize); err != nil { + return nil, err + } + + return &agentClientUDP{ + connUDP: connUDP, + client: client, + maxPacketSize: params.MaxPacketSize, + thriftBuffer: thriftBuffer, + thriftProtocol: thriftProtocol, + }, nil +} + +// EmitBatch buffers batch to fit into UDP packets and sends the data to the agent. +func (a *agentClientUDP) EmitBatch(ctx context.Context, batch *gen.Batch) error { + var errs []error + processSize, err := a.calcSizeOfSerializedThrift(ctx, batch.Process) + if err != nil { + // drop the batch if serialization of process fails. + return err + } + totalSize := processSize + var spans []*gen.Span + for _, span := range batch.Spans { + spanSize, err := a.calcSizeOfSerializedThrift(ctx, span) + if err != nil { + errs = append(errs, fmt.Errorf("thrift serialization failed: %v", span)) + continue + } + if spanSize+processSize >= a.maxPacketSize { + // drop the span that exceeds the limit. + errs = append(errs, fmt.Errorf("span too large to send: %v", span)) + continue + } + if totalSize+spanSize >= a.maxPacketSize { + if err := a.flush(ctx, &gen.Batch{ + Process: batch.Process, + Spans: spans, + }); err != nil { + errs = append(errs, err) + } + spans = spans[:0] + totalSize = processSize + } + totalSize += spanSize + spans = append(spans, span) + } + + if len(spans) > 0 { + if err := a.flush(ctx, &gen.Batch{ + Process: batch.Process, + Spans: spans, + }); err != nil { + errs = append(errs, err) + } + } + + if len(errs) == 1 { + return errs[0] + } else if len(errs) > 1 { + joined := a.makeJoinedErrorString(errs) + return fmt.Errorf("multiple errors during transform: %s", joined) + } + return nil +} + +// makeJoinedErrorString join all the errors to one error message. +func (a *agentClientUDP) makeJoinedErrorString(errs []error) string { + var errMsgs []string + for _, err := range errs { + errMsgs = append(errMsgs, err.Error()) + } + return strings.Join(errMsgs, ", ") +} + +// flush will send the batch of spans to the agent. +func (a *agentClientUDP) flush(ctx context.Context, batch *gen.Batch) error { + a.thriftBuffer.Reset() + if err := a.client.EmitBatch(ctx, batch); err != nil { + return err + } + if a.thriftBuffer.Len() > a.maxPacketSize { + return fmt.Errorf("data does not fit within one UDP packet; size %d, max %d, spans %d", + a.thriftBuffer.Len(), a.maxPacketSize, len(batch.Spans)) + } + _, err := a.connUDP.Write(a.thriftBuffer.Bytes()) + return err +} + +// calcSizeOfSerializedThrift calculate the serialized thrift packet size. +func (a *agentClientUDP) calcSizeOfSerializedThrift(ctx context.Context, thriftStruct thrift.TStruct) (int, error) { + a.thriftBuffer.Reset() + err := thriftStruct.Write(ctx, a.thriftProtocol) + return a.thriftBuffer.Len(), err +} + +// Close implements Close() of io.Closer and closes the underlying UDP connection. +func (a *agentClientUDP) Close() error { + return a.connUDP.Close() +} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/doc.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/doc.go similarity index 69% rename from src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/doc.go rename to src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/doc.go index 57bb0dac8..0d7ba8676 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/doc.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/doc.go @@ -13,8 +13,4 @@ // limitations under the License. // Package jaeger contains an OpenTelemetry tracing exporter for Jaeger. -// -// This package is currently in a pre-GA phase. Backwards incompatible changes -// may be introduced in subsequent minor version releases as we work to track -// the evolving OpenTelemetry specification and user feedback. -package jaeger // import "go.opentelemetry.io/otel/exporters/trace/jaeger" +package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger" diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/env.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/env.go new file mode 100644 index 000000000..945189806 --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/env.go @@ -0,0 +1,44 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger" + +import ( + "os" +) + +// Environment variable names +const ( + // Hostname for the Jaeger agent, part of address where exporter sends spans + // i.e. "localhost" + envAgentHost = "OTEL_EXPORTER_JAEGER_AGENT_HOST" + // Port for the Jaeger agent, part of address where exporter sends spans + // i.e. 6831 + envAgentPort = "OTEL_EXPORTER_JAEGER_AGENT_PORT" + // The HTTP endpoint for sending spans directly to a collector, + // i.e. http://jaeger-collector:14268/api/traces. + envEndpoint = "OTEL_EXPORTER_JAEGER_ENDPOINT" + // Username to send as part of "Basic" authentication to the collector endpoint. + envUser = "OTEL_EXPORTER_JAEGER_USER" + // Password to send as part of "Basic" authentication to the collector endpoint. + envPassword = "OTEL_EXPORTER_JAEGER_PASSWORD" +) + +// envOr returns an env variable's value if it is exists or the default if not +func envOr(key, defaultValue string) string { + if v, ok := os.LookupEnv(key); ok && v != "" { + return v + } + return defaultValue +} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/go.mod b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/go.mod new file mode 100644 index 000000000..42d337d54 --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/go.mod @@ -0,0 +1,75 @@ +module go.opentelemetry.io/otel/exporters/jaeger + +go 1.15 + +require ( + github.com/google/go-cmp v0.5.6 + github.com/stretchr/testify v1.7.0 + go.opentelemetry.io/otel v1.0.0 + go.opentelemetry.io/otel/sdk v1.0.0 + go.opentelemetry.io/otel/trace v1.0.0 +) + +replace go.opentelemetry.io/otel/bridge/opencensus => ../../bridge/opencensus + +replace go.opentelemetry.io/otel/bridge/opentracing => ../../bridge/opentracing + +replace go.opentelemetry.io/otel/example/jaeger => ../../example/jaeger + +replace go.opentelemetry.io/otel/example/namedtracer => ../../example/namedtracer + +replace go.opentelemetry.io/otel/example/opencensus => ../../example/opencensus + +replace go.opentelemetry.io/otel/example/otel-collector => ../../example/otel-collector + +replace go.opentelemetry.io/otel/example/prom-collector => ../../example/prom-collector + +replace go.opentelemetry.io/otel/example/prometheus => ../../example/prometheus + +replace go.opentelemetry.io/otel/example/zipkin => ../../example/zipkin + +replace go.opentelemetry.io/otel/exporters/prometheus => ../prometheus + +replace go.opentelemetry.io/otel/exporters/otlp => ../otlp + +replace go.opentelemetry.io/otel/exporters/jaeger => ./ + +replace go.opentelemetry.io/otel/internal/tools => ../../internal/tools + +replace go.opentelemetry.io/otel/metric => ../../metric + +replace go.opentelemetry.io/otel/sdk/export/metric => ../../sdk/export/metric + +replace go.opentelemetry.io/otel/sdk/metric => ../../sdk/metric + +replace go.opentelemetry.io/otel/trace => ../../trace + +replace go.opentelemetry.io/otel/example/passthrough => ../../example/passthrough + +replace go.opentelemetry.io/otel/exporters/otlp/otlptrace => ../otlp/otlptrace + +replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => ../otlp/otlptrace/otlptracegrpc + +replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../otlp/otlptrace/otlptracehttp + +replace go.opentelemetry.io/otel/internal/metric => ../../internal/metric + +replace go.opentelemetry.io/otel => ../.. + +replace go.opentelemetry.io/otel/exporters/zipkin => ../zipkin + +replace go.opentelemetry.io/otel/sdk => ../../sdk + +replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../otlp/otlpmetric + +replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../otlp/otlpmetric/otlpmetricgrpc + +replace go.opentelemetry.io/otel/exporters/stdout/stdoutmetric => ../stdout/stdoutmetric + +replace go.opentelemetry.io/otel/exporters/stdout/stdouttrace => ../stdout/stdouttrace + +replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp => ../otlp/otlpmetric/otlpmetrichttp + +replace go.opentelemetry.io/otel/bridge/opencensus/test => ../../bridge/opencensus/test + +replace go.opentelemetry.io/otel/example/fib => ../../example/fib diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/go.sum b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/go.sum new file mode 100644 index 000000000..f0e2991c1 --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/go.sum @@ -0,0 +1,18 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7 h1:iGu644GcxtEcrInvDsQRCwJjtCIOlT2V7IRt6ah2Whw= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/GoUnusedProtection__.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/GoUnusedProtection__.go new file mode 100644 index 000000000..54cd3b086 --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/GoUnusedProtection__.go @@ -0,0 +1,6 @@ +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. + +package agent + +var GoUnusedProtection__ int; + diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent-consts.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent-consts.go new file mode 100644 index 000000000..3b96e3222 --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent-consts.go @@ -0,0 +1,27 @@ +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. + +package agent + +import ( + "bytes" + "context" + "fmt" + "time" + + "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger" + "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore" + "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = time.Now +var _ = bytes.Equal + +var _ = jaeger.GoUnusedProtection__ +var _ = zipkincore.GoUnusedProtection__ + +func init() { +} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent.go new file mode 100644 index 000000000..c7c8e9ca3 --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent.go @@ -0,0 +1,412 @@ +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. + +package agent + +import ( + "bytes" + "context" + "fmt" + "time" + + "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger" + "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore" + "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = time.Now +var _ = bytes.Equal + +var _ = jaeger.GoUnusedProtection__ +var _ = zipkincore.GoUnusedProtection__ + +type Agent interface { + // Parameters: + // - Spans + EmitZipkinBatch(ctx context.Context, spans []*zipkincore.Span) (_err error) + // Parameters: + // - Batch + EmitBatch(ctx context.Context, batch *jaeger.Batch) (_err error) +} + +type AgentClient struct { + c thrift.TClient + meta thrift.ResponseMeta +} + +func NewAgentClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AgentClient { + return &AgentClient{ + c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), + } +} + +func NewAgentClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AgentClient { + return &AgentClient{ + c: thrift.NewTStandardClient(iprot, oprot), + } +} + +func NewAgentClient(c thrift.TClient) *AgentClient { + return &AgentClient{ + c: c, + } +} + +func (p *AgentClient) Client_() thrift.TClient { + return p.c +} + +func (p *AgentClient) LastResponseMeta_() thrift.ResponseMeta { + return p.meta +} + +func (p *AgentClient) SetLastResponseMeta_(meta thrift.ResponseMeta) { + p.meta = meta +} + +// Parameters: +// - Spans +func (p *AgentClient) EmitZipkinBatch(ctx context.Context, spans []*zipkincore.Span) (_err error) { + var _args0 AgentEmitZipkinBatchArgs + _args0.Spans = spans + p.SetLastResponseMeta_(thrift.ResponseMeta{}) + if _, err := p.Client_().Call(ctx, "emitZipkinBatch", &_args0, nil); err != nil { + return err + } + return nil +} + +// Parameters: +// - Batch +func (p *AgentClient) EmitBatch(ctx context.Context, batch *jaeger.Batch) (_err error) { + var _args1 AgentEmitBatchArgs + _args1.Batch = batch + p.SetLastResponseMeta_(thrift.ResponseMeta{}) + if _, err := p.Client_().Call(ctx, "emitBatch", &_args1, nil); err != nil { + return err + } + return nil +} + +type AgentProcessor struct { + processorMap map[string]thrift.TProcessorFunction + handler Agent +} + +func (p *AgentProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { + p.processorMap[key] = processor +} + +func (p *AgentProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { + processor, ok = p.processorMap[key] + return processor, ok +} + +func (p *AgentProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { + return p.processorMap +} + +func NewAgentProcessor(handler Agent) *AgentProcessor { + + self2 := &AgentProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} + self2.processorMap["emitZipkinBatch"] = &agentProcessorEmitZipkinBatch{handler: handler} + self2.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler: handler} + return self2 +} + +func (p *AgentProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + name, _, seqId, err2 := iprot.ReadMessageBegin(ctx) + if err2 != nil { + return false, thrift.WrapTException(err2) + } + if processor, ok := p.GetProcessorFunction(name); ok { + return processor.Process(ctx, seqId, iprot, oprot) + } + iprot.Skip(ctx, thrift.STRUCT) + iprot.ReadMessageEnd(ctx) + x3 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) + oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId) + x3.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, x3 + +} + +type agentProcessorEmitZipkinBatch struct { + handler Agent +} + +func (p *agentProcessorEmitZipkinBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AgentEmitZipkinBatchArgs{} + var err2 error + if err2 = args.Read(ctx, iprot); err2 != nil { + iprot.ReadMessageEnd(ctx) + return false, thrift.WrapTException(err2) + } + iprot.ReadMessageEnd(ctx) + + tickerCancel := func() {} + _ = tickerCancel + + if err2 = p.handler.EmitZipkinBatch(ctx, args.Spans); err2 != nil { + tickerCancel() + return true, thrift.WrapTException(err2) + } + tickerCancel() + return true, nil +} + +type agentProcessorEmitBatch struct { + handler Agent +} + +func (p *agentProcessorEmitBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AgentEmitBatchArgs{} + var err2 error + if err2 = args.Read(ctx, iprot); err2 != nil { + iprot.ReadMessageEnd(ctx) + return false, thrift.WrapTException(err2) + } + iprot.ReadMessageEnd(ctx) + + tickerCancel := func() {} + _ = tickerCancel + + if err2 = p.handler.EmitBatch(ctx, args.Batch); err2 != nil { + tickerCancel() + return true, thrift.WrapTException(err2) + } + tickerCancel() + return true, nil +} + +// HELPER FUNCTIONS AND STRUCTURES + +// Attributes: +// - Spans +type AgentEmitZipkinBatchArgs struct { + Spans []*zipkincore.Span `thrift:"spans,1" db:"spans" json:"spans"` +} + +func NewAgentEmitZipkinBatchArgs() *AgentEmitZipkinBatchArgs { + return &AgentEmitZipkinBatchArgs{} +} + +func (p *AgentEmitZipkinBatchArgs) GetSpans() []*zipkincore.Span { + return p.Spans +} +func (p *AgentEmitZipkinBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AgentEmitZipkinBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*zipkincore.Span, 0, size) + p.Spans = tSlice + for i := 0; i < size; i++ { + _elem4 := &zipkincore.Span{} + if err := _elem4.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err) + } + p.Spans = append(p.Spans, _elem4) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *AgentEmitZipkinBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "emitZipkinBatch_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AgentEmitZipkinBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err) + } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Spans { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err) + } + return err +} + +func (p *AgentEmitZipkinBatchArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AgentEmitZipkinBatchArgs(%+v)", *p) +} + +// Attributes: +// - Batch +type AgentEmitBatchArgs struct { + Batch *jaeger.Batch `thrift:"batch,1" db:"batch" json:"batch"` +} + +func NewAgentEmitBatchArgs() *AgentEmitBatchArgs { + return &AgentEmitBatchArgs{} +} + +var AgentEmitBatchArgs_Batch_DEFAULT *jaeger.Batch + +func (p *AgentEmitBatchArgs) GetBatch() *jaeger.Batch { + if !p.IsSetBatch() { + return AgentEmitBatchArgs_Batch_DEFAULT + } + return p.Batch +} +func (p *AgentEmitBatchArgs) IsSetBatch() bool { + return p.Batch != nil +} + +func (p *AgentEmitBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AgentEmitBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Batch = &jaeger.Batch{} + if err := p.Batch.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err) + } + return nil +} + +func (p *AgentEmitBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "emitBatch_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AgentEmitBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "batch", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err) + } + if err := p.Batch.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err) + } + return err +} + +func (p *AgentEmitBatchArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p) +} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/GoUnusedProtection__.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/GoUnusedProtection__.go new file mode 100644 index 000000000..fe45a9f9a --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/GoUnusedProtection__.go @@ -0,0 +1,6 @@ +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. + +package jaeger + +var GoUnusedProtection__ int; + diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/internal/gen-go/jaeger/jaeger-consts.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger-consts.go similarity index 54% rename from src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/internal/gen-go/jaeger/jaeger-consts.go rename to src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger-consts.go index d2b0fa9a9..10162857f 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/internal/gen-go/jaeger/jaeger-consts.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger-consts.go @@ -1,5 +1,4 @@ -// Autogenerated by Thrift Compiler (0.11.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. package jaeger @@ -7,16 +6,16 @@ import ( "bytes" "context" "fmt" - "reflect" + "time" - "github.com/apache/thrift/lib/go/thrift" + "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" ) // (needed to ensure safety because of naive import list construction.) var _ = thrift.ZERO var _ = fmt.Printf var _ = context.Background -var _ = reflect.DeepEqual +var _ = time.Now var _ = bytes.Equal func init() { diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/internal/gen-go/jaeger/jaeger.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger.go similarity index 50% rename from src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/internal/gen-go/jaeger/jaeger.go rename to src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger.go index 3dc6f6cbf..b1fe26c57 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/internal/gen-go/jaeger/jaeger.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger.go @@ -1,7 +1,6 @@ -// Autogenerated by Thrift Compiler (0.11.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. -package jaeger // import "go.opentelemetry.io/otel/exporters/trace/jaeger/internal/gen-go/jaeger" +package jaeger import ( "bytes" @@ -9,16 +8,16 @@ import ( "database/sql/driver" "errors" "fmt" - "reflect" + "time" - "github.com/apache/thrift/lib/go/thrift" + "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" ) // (needed to ensure safety because of naive import list construction.) var _ = thrift.ZERO var _ = fmt.Printf var _ = context.Background -var _ = reflect.DeepEqual +var _ = time.Now var _ = bytes.Equal type TagType int64 @@ -243,8 +242,8 @@ func (p *Tag) IsSetVBinary() bool { return p.VBinary != nil } -func (p *Tag) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { +func (p *Tag) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) } @@ -252,7 +251,7 @@ func (p *Tag) Read(iprot thrift.TProtocol) error { var issetVType bool = false for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) if err != nil { return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) } @@ -262,86 +261,86 @@ func (p *Tag) Read(iprot thrift.TProtocol) error { switch fieldId { case 1: if fieldTypeId == thrift.STRING { - if err := p.ReadField1(iprot); err != nil { + if err := p.ReadField1(ctx, iprot); err != nil { return err } + issetKey = true } else { - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } - issetKey = true case 2: if fieldTypeId == thrift.I32 { - if err := p.ReadField2(iprot); err != nil { + if err := p.ReadField2(ctx, iprot); err != nil { return err } + issetVType = true } else { - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } - issetVType = true case 3: if fieldTypeId == thrift.STRING { - if err := p.ReadField3(iprot); err != nil { + if err := p.ReadField3(ctx, iprot); err != nil { return err } } else { - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } case 4: if fieldTypeId == thrift.DOUBLE { - if err := p.ReadField4(iprot); err != nil { + if err := p.ReadField4(ctx, iprot); err != nil { return err } } else { - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } case 5: if fieldTypeId == thrift.BOOL { - if err := p.ReadField5(iprot); err != nil { + if err := p.ReadField5(ctx, iprot); err != nil { return err } } else { - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } case 6: if fieldTypeId == thrift.I64 { - if err := p.ReadField6(iprot); err != nil { + if err := p.ReadField6(ctx, iprot); err != nil { return err } } else { - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } case 7: if fieldTypeId == thrift.STRING { - if err := p.ReadField7(iprot); err != nil { + if err := p.ReadField7(ctx, iprot); err != nil { return err } } else { - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } default: - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } - if err := iprot.ReadFieldEnd(); err != nil { + if err := iprot.ReadFieldEnd(ctx); err != nil { return err } } - if err := iprot.ReadStructEnd(); err != nil { + if err := iprot.ReadStructEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } if !issetKey { @@ -353,8 +352,8 @@ func (p *Tag) Read(iprot thrift.TProtocol) error { return nil } -func (p *Tag) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { +func (p *Tag) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { return thrift.PrependError("error reading field 1: ", err) } else { p.Key = v @@ -362,8 +361,8 @@ func (p *Tag) ReadField1(iprot thrift.TProtocol) error { return nil } -func (p *Tag) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { +func (p *Tag) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { return thrift.PrependError("error reading field 2: ", err) } else { temp := TagType(v) @@ -372,8 +371,8 @@ func (p *Tag) ReadField2(iprot thrift.TProtocol) error { return nil } -func (p *Tag) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { +func (p *Tag) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { return thrift.PrependError("error reading field 3: ", err) } else { p.VStr = &v @@ -381,8 +380,8 @@ func (p *Tag) ReadField3(iprot thrift.TProtocol) error { return nil } -func (p *Tag) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadDouble(); err != nil { +func (p *Tag) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadDouble(ctx); err != nil { return thrift.PrependError("error reading field 4: ", err) } else { p.VDouble = &v @@ -390,8 +389,8 @@ func (p *Tag) ReadField4(iprot thrift.TProtocol) error { return nil } -func (p *Tag) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { +func (p *Tag) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { return thrift.PrependError("error reading field 5: ", err) } else { p.VBool = &v @@ -399,8 +398,8 @@ func (p *Tag) ReadField5(iprot thrift.TProtocol) error { return nil } -func (p *Tag) ReadField6(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { +func (p *Tag) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { return thrift.PrependError("error reading field 6: ", err) } else { p.VLong = &v @@ -408,8 +407,8 @@ func (p *Tag) ReadField6(iprot thrift.TProtocol) error { return nil } -func (p *Tag) ReadField7(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { +func (p *Tag) ReadField7(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { return thrift.PrependError("error reading field 7: ", err) } else { p.VBinary = v @@ -417,143 +416,193 @@ func (p *Tag) ReadField7(iprot thrift.TProtocol) error { return nil } -func (p *Tag) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("Tag"); err != nil { +func (p *Tag) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Tag"); err != nil { return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } if p != nil { - if err := p.writeField1(oprot); err != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(oprot); err != nil { + if err := p.writeField2(ctx, oprot); err != nil { return err } - if err := p.writeField3(oprot); err != nil { + if err := p.writeField3(ctx, oprot); err != nil { return err } - if err := p.writeField4(oprot); err != nil { + if err := p.writeField4(ctx, oprot); err != nil { return err } - if err := p.writeField5(oprot); err != nil { + if err := p.writeField5(ctx, oprot); err != nil { return err } - if err := p.writeField6(oprot); err != nil { + if err := p.writeField6(ctx, oprot); err != nil { return err } - if err := p.writeField7(oprot); err != nil { + if err := p.writeField7(ctx, oprot); err != nil { return err } } - if err := oprot.WriteFieldStop(); err != nil { + if err := oprot.WriteFieldStop(ctx); err != nil { return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(); err != nil { + if err := oprot.WriteStructEnd(ctx); err != nil { return thrift.PrependError("write struct stop error: ", err) } return nil } -func (p *Tag) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("key", thrift.STRING, 1); err != nil { +func (p *Tag) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "key", thrift.STRING, 1); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) } - if err := oprot.WriteString(string(p.Key)); err != nil { + if err := oprot.WriteString(ctx, string(p.Key)); err != nil { return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { + if err := oprot.WriteFieldEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) } return err } -func (p *Tag) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("vType", thrift.I32, 2); err != nil { +func (p *Tag) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "vType", thrift.I32, 2); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:vType: ", p), err) } - if err := oprot.WriteI32(int32(p.VType)); err != nil { + if err := oprot.WriteI32(ctx, int32(p.VType)); err != nil { return thrift.PrependError(fmt.Sprintf("%T.vType (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { + if err := oprot.WriteFieldEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field end error 2:vType: ", p), err) } return err } -func (p *Tag) writeField3(oprot thrift.TProtocol) (err error) { +func (p *Tag) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { if p.IsSetVStr() { - if err := oprot.WriteFieldBegin("vStr", thrift.STRING, 3); err != nil { + if err := oprot.WriteFieldBegin(ctx, "vStr", thrift.STRING, 3); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:vStr: ", p), err) } - if err := oprot.WriteString(string(*p.VStr)); err != nil { + if err := oprot.WriteString(ctx, string(*p.VStr)); err != nil { return thrift.PrependError(fmt.Sprintf("%T.vStr (3) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { + if err := oprot.WriteFieldEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field end error 3:vStr: ", p), err) } } return err } -func (p *Tag) writeField4(oprot thrift.TProtocol) (err error) { +func (p *Tag) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { if p.IsSetVDouble() { - if err := oprot.WriteFieldBegin("vDouble", thrift.DOUBLE, 4); err != nil { + if err := oprot.WriteFieldBegin(ctx, "vDouble", thrift.DOUBLE, 4); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:vDouble: ", p), err) } - if err := oprot.WriteDouble(float64(*p.VDouble)); err != nil { + if err := oprot.WriteDouble(ctx, float64(*p.VDouble)); err != nil { return thrift.PrependError(fmt.Sprintf("%T.vDouble (4) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { + if err := oprot.WriteFieldEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field end error 4:vDouble: ", p), err) } } return err } -func (p *Tag) writeField5(oprot thrift.TProtocol) (err error) { +func (p *Tag) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { if p.IsSetVBool() { - if err := oprot.WriteFieldBegin("vBool", thrift.BOOL, 5); err != nil { + if err := oprot.WriteFieldBegin(ctx, "vBool", thrift.BOOL, 5); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:vBool: ", p), err) } - if err := oprot.WriteBool(bool(*p.VBool)); err != nil { + if err := oprot.WriteBool(ctx, bool(*p.VBool)); err != nil { return thrift.PrependError(fmt.Sprintf("%T.vBool (5) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { + if err := oprot.WriteFieldEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field end error 5:vBool: ", p), err) } } return err } -func (p *Tag) writeField6(oprot thrift.TProtocol) (err error) { +func (p *Tag) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { if p.IsSetVLong() { - if err := oprot.WriteFieldBegin("vLong", thrift.I64, 6); err != nil { + if err := oprot.WriteFieldBegin(ctx, "vLong", thrift.I64, 6); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:vLong: ", p), err) } - if err := oprot.WriteI64(int64(*p.VLong)); err != nil { + if err := oprot.WriteI64(ctx, int64(*p.VLong)); err != nil { return thrift.PrependError(fmt.Sprintf("%T.vLong (6) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { + if err := oprot.WriteFieldEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field end error 6:vLong: ", p), err) } } return err } -func (p *Tag) writeField7(oprot thrift.TProtocol) (err error) { +func (p *Tag) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) { if p.IsSetVBinary() { - if err := oprot.WriteFieldBegin("vBinary", thrift.STRING, 7); err != nil { + if err := oprot.WriteFieldBegin(ctx, "vBinary", thrift.STRING, 7); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:vBinary: ", p), err) } - if err := oprot.WriteBinary(p.VBinary); err != nil { + if err := oprot.WriteBinary(ctx, p.VBinary); err != nil { return thrift.PrependError(fmt.Sprintf("%T.vBinary (7) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { + if err := oprot.WriteFieldEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field end error 7:vBinary: ", p), err) } } return err } +func (p *Tag) Equals(other *Tag) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Key != other.Key { + return false + } + if p.VType != other.VType { + return false + } + if p.VStr != other.VStr { + if p.VStr == nil || other.VStr == nil { + return false + } + if (*p.VStr) != (*other.VStr) { + return false + } + } + if p.VDouble != other.VDouble { + if p.VDouble == nil || other.VDouble == nil { + return false + } + if (*p.VDouble) != (*other.VDouble) { + return false + } + } + if p.VBool != other.VBool { + if p.VBool == nil || other.VBool == nil { + return false + } + if (*p.VBool) != (*other.VBool) { + return false + } + } + if p.VLong != other.VLong { + if p.VLong == nil || other.VLong == nil { + return false + } + if (*p.VLong) != (*other.VLong) { + return false + } + } + if bytes.Compare(p.VBinary, other.VBinary) != 0 { + return false + } + return true +} + func (p *Tag) String() string { if p == nil { return "" @@ -580,8 +629,8 @@ func (p *Log) GetTimestamp() int64 { func (p *Log) GetFields() []*Tag { return p.Fields } -func (p *Log) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { +func (p *Log) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) } @@ -589,7 +638,7 @@ func (p *Log) Read(iprot thrift.TProtocol) error { var issetFields bool = false for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) if err != nil { return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) } @@ -599,36 +648,36 @@ func (p *Log) Read(iprot thrift.TProtocol) error { switch fieldId { case 1: if fieldTypeId == thrift.I64 { - if err := p.ReadField1(iprot); err != nil { + if err := p.ReadField1(ctx, iprot); err != nil { return err } + issetTimestamp = true } else { - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } - issetTimestamp = true case 2: if fieldTypeId == thrift.LIST { - if err := p.ReadField2(iprot); err != nil { + if err := p.ReadField2(ctx, iprot); err != nil { return err } + issetFields = true } else { - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } - issetFields = true default: - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } - if err := iprot.ReadFieldEnd(); err != nil { + if err := iprot.ReadFieldEnd(ctx); err != nil { return err } } - if err := iprot.ReadStructEnd(); err != nil { + if err := iprot.ReadStructEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } if !issetTimestamp { @@ -640,8 +689,8 @@ func (p *Log) Read(iprot thrift.TProtocol) error { return nil } -func (p *Log) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { +func (p *Log) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { return thrift.PrependError("error reading field 1: ", err) } else { p.Timestamp = v @@ -649,8 +698,8 @@ func (p *Log) ReadField1(iprot thrift.TProtocol) error { return nil } -func (p *Log) ReadField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() +func (p *Log) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) if err != nil { return thrift.PrependError("error reading list begin: ", err) } @@ -658,72 +707,93 @@ func (p *Log) ReadField2(iprot thrift.TProtocol) error { p.Fields = tSlice for i := 0; i < size; i++ { _elem0 := &Tag{} - if err := _elem0.Read(iprot); err != nil { + if err := _elem0.Read(ctx, iprot); err != nil { return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) } p.Fields = append(p.Fields, _elem0) } - if err := iprot.ReadListEnd(); err != nil { + if err := iprot.ReadListEnd(ctx); err != nil { return thrift.PrependError("error reading list end: ", err) } return nil } -func (p *Log) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("Log"); err != nil { +func (p *Log) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Log"); err != nil { return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } if p != nil { - if err := p.writeField1(oprot); err != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(oprot); err != nil { + if err := p.writeField2(ctx, oprot); err != nil { return err } } - if err := oprot.WriteFieldStop(); err != nil { + if err := oprot.WriteFieldStop(ctx); err != nil { return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(); err != nil { + if err := oprot.WriteStructEnd(ctx); err != nil { return thrift.PrependError("write struct stop error: ", err) } return nil } -func (p *Log) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 1); err != nil { +func (p *Log) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 1); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err) } - if err := oprot.WriteI64(int64(p.Timestamp)); err != nil { + if err := oprot.WriteI64(ctx, int64(p.Timestamp)); err != nil { return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { + if err := oprot.WriteFieldEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err) } return err } -func (p *Log) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("fields", thrift.LIST, 2); err != nil { +func (p *Log) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "fields", thrift.LIST, 2); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:fields: ", p), err) } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Fields)); err != nil { + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Fields)); err != nil { return thrift.PrependError("error writing list begin: ", err) } for _, v := range p.Fields { - if err := v.Write(oprot); err != nil { + if err := v.Write(ctx, oprot); err != nil { return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) } } - if err := oprot.WriteListEnd(); err != nil { + if err := oprot.WriteListEnd(ctx); err != nil { return thrift.PrependError("error writing list end: ", err) } - if err := oprot.WriteFieldEnd(); err != nil { + if err := oprot.WriteFieldEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field end error 2:fields: ", p), err) } return err } +func (p *Log) Equals(other *Log) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Timestamp != other.Timestamp { + return false + } + if len(p.Fields) != len(other.Fields) { + return false + } + for i, _tgt := range p.Fields { + _src1 := other.Fields[i] + if !_tgt.Equals(_src1) { + return false + } + } + return true +} + func (p *Log) String() string { if p == nil { return "" @@ -762,8 +832,8 @@ func (p *SpanRef) GetTraceIdHigh() int64 { func (p *SpanRef) GetSpanId() int64 { return p.SpanId } -func (p *SpanRef) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { +func (p *SpanRef) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) } @@ -773,7 +843,7 @@ func (p *SpanRef) Read(iprot thrift.TProtocol) error { var issetSpanId bool = false for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) if err != nil { return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) } @@ -783,58 +853,58 @@ func (p *SpanRef) Read(iprot thrift.TProtocol) error { switch fieldId { case 1: if fieldTypeId == thrift.I32 { - if err := p.ReadField1(iprot); err != nil { + if err := p.ReadField1(ctx, iprot); err != nil { return err } + issetRefType = true } else { - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } - issetRefType = true case 2: if fieldTypeId == thrift.I64 { - if err := p.ReadField2(iprot); err != nil { + if err := p.ReadField2(ctx, iprot); err != nil { return err } + issetTraceIdLow = true } else { - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } - issetTraceIdLow = true case 3: if fieldTypeId == thrift.I64 { - if err := p.ReadField3(iprot); err != nil { + if err := p.ReadField3(ctx, iprot); err != nil { return err } + issetTraceIdHigh = true } else { - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } - issetTraceIdHigh = true case 4: if fieldTypeId == thrift.I64 { - if err := p.ReadField4(iprot); err != nil { + if err := p.ReadField4(ctx, iprot); err != nil { return err } + issetSpanId = true } else { - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } - issetSpanId = true default: - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } - if err := iprot.ReadFieldEnd(); err != nil { + if err := iprot.ReadFieldEnd(ctx); err != nil { return err } } - if err := iprot.ReadStructEnd(); err != nil { + if err := iprot.ReadStructEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } if !issetRefType { @@ -852,8 +922,8 @@ func (p *SpanRef) Read(iprot thrift.TProtocol) error { return nil } -func (p *SpanRef) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { +func (p *SpanRef) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { return thrift.PrependError("error reading field 1: ", err) } else { temp := SpanRefType(v) @@ -862,8 +932,8 @@ func (p *SpanRef) ReadField1(iprot thrift.TProtocol) error { return nil } -func (p *SpanRef) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { +func (p *SpanRef) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { return thrift.PrependError("error reading field 2: ", err) } else { p.TraceIdLow = v @@ -871,8 +941,8 @@ func (p *SpanRef) ReadField2(iprot thrift.TProtocol) error { return nil } -func (p *SpanRef) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { +func (p *SpanRef) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { return thrift.PrependError("error reading field 3: ", err) } else { p.TraceIdHigh = v @@ -880,8 +950,8 @@ func (p *SpanRef) ReadField3(iprot thrift.TProtocol) error { return nil } -func (p *SpanRef) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { +func (p *SpanRef) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { return thrift.PrependError("error reading field 4: ", err) } else { p.SpanId = v @@ -889,85 +959,106 @@ func (p *SpanRef) ReadField4(iprot thrift.TProtocol) error { return nil } -func (p *SpanRef) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("SpanRef"); err != nil { +func (p *SpanRef) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "SpanRef"); err != nil { return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } if p != nil { - if err := p.writeField1(oprot); err != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(oprot); err != nil { + if err := p.writeField2(ctx, oprot); err != nil { return err } - if err := p.writeField3(oprot); err != nil { + if err := p.writeField3(ctx, oprot); err != nil { return err } - if err := p.writeField4(oprot); err != nil { + if err := p.writeField4(ctx, oprot); err != nil { return err } } - if err := oprot.WriteFieldStop(); err != nil { + if err := oprot.WriteFieldStop(ctx); err != nil { return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(); err != nil { + if err := oprot.WriteStructEnd(ctx); err != nil { return thrift.PrependError("write struct stop error: ", err) } return nil } -func (p *SpanRef) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("refType", thrift.I32, 1); err != nil { +func (p *SpanRef) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "refType", thrift.I32, 1); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:refType: ", p), err) } - if err := oprot.WriteI32(int32(p.RefType)); err != nil { + if err := oprot.WriteI32(ctx, int32(p.RefType)); err != nil { return thrift.PrependError(fmt.Sprintf("%T.refType (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { + if err := oprot.WriteFieldEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field end error 1:refType: ", p), err) } return err } -func (p *SpanRef) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("traceIdLow", thrift.I64, 2); err != nil { +func (p *SpanRef) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "traceIdLow", thrift.I64, 2); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdLow: ", p), err) } - if err := oprot.WriteI64(int64(p.TraceIdLow)); err != nil { + if err := oprot.WriteI64(ctx, int64(p.TraceIdLow)); err != nil { return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { + if err := oprot.WriteFieldEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdLow: ", p), err) } return err } -func (p *SpanRef) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("traceIdHigh", thrift.I64, 3); err != nil { +func (p *SpanRef) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "traceIdHigh", thrift.I64, 3); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:traceIdHigh: ", p), err) } - if err := oprot.WriteI64(int64(p.TraceIdHigh)); err != nil { + if err := oprot.WriteI64(ctx, int64(p.TraceIdHigh)); err != nil { return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (3) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { + if err := oprot.WriteFieldEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field end error 3:traceIdHigh: ", p), err) } return err } -func (p *SpanRef) writeField4(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("spanId", thrift.I64, 4); err != nil { +func (p *SpanRef) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "spanId", thrift.I64, 4); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:spanId: ", p), err) } - if err := oprot.WriteI64(int64(p.SpanId)); err != nil { + if err := oprot.WriteI64(ctx, int64(p.SpanId)); err != nil { return thrift.PrependError(fmt.Sprintf("%T.spanId (4) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { + if err := oprot.WriteFieldEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field end error 4:spanId: ", p), err) } return err } +func (p *SpanRef) Equals(other *SpanRef) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.RefType != other.RefType { + return false + } + if p.TraceIdLow != other.TraceIdLow { + return false + } + if p.TraceIdHigh != other.TraceIdHigh { + return false + } + if p.SpanId != other.SpanId { + return false + } + return true +} + func (p *SpanRef) String() string { if p == nil { return "" @@ -1066,8 +1157,8 @@ func (p *Span) IsSetLogs() bool { return p.Logs != nil } -func (p *Span) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { +func (p *Span) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) } @@ -1081,7 +1172,7 @@ func (p *Span) Read(iprot thrift.TProtocol) error { var issetDuration bool = false for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) if err != nil { return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) } @@ -1091,132 +1182,132 @@ func (p *Span) Read(iprot thrift.TProtocol) error { switch fieldId { case 1: if fieldTypeId == thrift.I64 { - if err := p.ReadField1(iprot); err != nil { + if err := p.ReadField1(ctx, iprot); err != nil { return err } + issetTraceIdLow = true } else { - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } - issetTraceIdLow = true case 2: if fieldTypeId == thrift.I64 { - if err := p.ReadField2(iprot); err != nil { + if err := p.ReadField2(ctx, iprot); err != nil { return err } + issetTraceIdHigh = true } else { - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } - issetTraceIdHigh = true case 3: if fieldTypeId == thrift.I64 { - if err := p.ReadField3(iprot); err != nil { + if err := p.ReadField3(ctx, iprot); err != nil { return err } + issetSpanId = true } else { - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } - issetSpanId = true case 4: if fieldTypeId == thrift.I64 { - if err := p.ReadField4(iprot); err != nil { + if err := p.ReadField4(ctx, iprot); err != nil { return err } + issetParentSpanId = true } else { - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } - issetParentSpanId = true case 5: if fieldTypeId == thrift.STRING { - if err := p.ReadField5(iprot); err != nil { + if err := p.ReadField5(ctx, iprot); err != nil { return err } + issetOperationName = true } else { - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } - issetOperationName = true case 6: if fieldTypeId == thrift.LIST { - if err := p.ReadField6(iprot); err != nil { + if err := p.ReadField6(ctx, iprot); err != nil { return err } } else { - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } case 7: if fieldTypeId == thrift.I32 { - if err := p.ReadField7(iprot); err != nil { + if err := p.ReadField7(ctx, iprot); err != nil { return err } + issetFlags = true } else { - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } - issetFlags = true case 8: if fieldTypeId == thrift.I64 { - if err := p.ReadField8(iprot); err != nil { + if err := p.ReadField8(ctx, iprot); err != nil { return err } + issetStartTime = true } else { - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } - issetStartTime = true case 9: if fieldTypeId == thrift.I64 { - if err := p.ReadField9(iprot); err != nil { + if err := p.ReadField9(ctx, iprot); err != nil { return err } + issetDuration = true } else { - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } - issetDuration = true case 10: if fieldTypeId == thrift.LIST { - if err := p.ReadField10(iprot); err != nil { + if err := p.ReadField10(ctx, iprot); err != nil { return err } } else { - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } case 11: if fieldTypeId == thrift.LIST { - if err := p.ReadField11(iprot); err != nil { + if err := p.ReadField11(ctx, iprot); err != nil { return err } } else { - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } default: - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } - if err := iprot.ReadFieldEnd(); err != nil { + if err := iprot.ReadFieldEnd(ctx); err != nil { return err } } - if err := iprot.ReadStructEnd(); err != nil { + if err := iprot.ReadStructEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } if !issetTraceIdLow { @@ -1246,8 +1337,8 @@ func (p *Span) Read(iprot thrift.TProtocol) error { return nil } -func (p *Span) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { +func (p *Span) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { return thrift.PrependError("error reading field 1: ", err) } else { p.TraceIdLow = v @@ -1255,8 +1346,8 @@ func (p *Span) ReadField1(iprot thrift.TProtocol) error { return nil } -func (p *Span) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { +func (p *Span) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { return thrift.PrependError("error reading field 2: ", err) } else { p.TraceIdHigh = v @@ -1264,8 +1355,8 @@ func (p *Span) ReadField2(iprot thrift.TProtocol) error { return nil } -func (p *Span) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { +func (p *Span) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { return thrift.PrependError("error reading field 3: ", err) } else { p.SpanId = v @@ -1273,8 +1364,8 @@ func (p *Span) ReadField3(iprot thrift.TProtocol) error { return nil } -func (p *Span) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { +func (p *Span) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { return thrift.PrependError("error reading field 4: ", err) } else { p.ParentSpanId = v @@ -1282,8 +1373,8 @@ func (p *Span) ReadField4(iprot thrift.TProtocol) error { return nil } -func (p *Span) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { +func (p *Span) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { return thrift.PrependError("error reading field 5: ", err) } else { p.OperationName = v @@ -1291,28 +1382,28 @@ func (p *Span) ReadField5(iprot thrift.TProtocol) error { return nil } -func (p *Span) ReadField6(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() +func (p *Span) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) if err != nil { return thrift.PrependError("error reading list begin: ", err) } tSlice := make([]*SpanRef, 0, size) p.References = tSlice for i := 0; i < size; i++ { - _elem1 := &SpanRef{} - if err := _elem1.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err) + _elem2 := &SpanRef{} + if err := _elem2.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err) } - p.References = append(p.References, _elem1) + p.References = append(p.References, _elem2) } - if err := iprot.ReadListEnd(); err != nil { + if err := iprot.ReadListEnd(ctx); err != nil { return thrift.PrependError("error reading list end: ", err) } return nil } -func (p *Span) ReadField7(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { +func (p *Span) ReadField7(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { return thrift.PrependError("error reading field 7: ", err) } else { p.Flags = v @@ -1320,8 +1411,8 @@ func (p *Span) ReadField7(iprot thrift.TProtocol) error { return nil } -func (p *Span) ReadField8(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { +func (p *Span) ReadField8(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { return thrift.PrependError("error reading field 8: ", err) } else { p.StartTime = v @@ -1329,8 +1420,8 @@ func (p *Span) ReadField8(iprot thrift.TProtocol) error { return nil } -func (p *Span) ReadField9(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { +func (p *Span) ReadField9(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { return thrift.PrependError("error reading field 9: ", err) } else { p.Duration = v @@ -1338,267 +1429,327 @@ func (p *Span) ReadField9(iprot thrift.TProtocol) error { return nil } -func (p *Span) ReadField10(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() +func (p *Span) ReadField10(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) if err != nil { return thrift.PrependError("error reading list begin: ", err) } tSlice := make([]*Tag, 0, size) p.Tags = tSlice for i := 0; i < size; i++ { - _elem2 := &Tag{} - if err := _elem2.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err) + _elem3 := &Tag{} + if err := _elem3.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem3), err) } - p.Tags = append(p.Tags, _elem2) + p.Tags = append(p.Tags, _elem3) } - if err := iprot.ReadListEnd(); err != nil { + if err := iprot.ReadListEnd(ctx); err != nil { return thrift.PrependError("error reading list end: ", err) } return nil } -func (p *Span) ReadField11(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() +func (p *Span) ReadField11(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) if err != nil { return thrift.PrependError("error reading list begin: ", err) } tSlice := make([]*Log, 0, size) p.Logs = tSlice for i := 0; i < size; i++ { - _elem3 := &Log{} - if err := _elem3.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem3), err) + _elem4 := &Log{} + if err := _elem4.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err) } - p.Logs = append(p.Logs, _elem3) + p.Logs = append(p.Logs, _elem4) } - if err := iprot.ReadListEnd(); err != nil { + if err := iprot.ReadListEnd(ctx); err != nil { return thrift.PrependError("error reading list end: ", err) } return nil } -func (p *Span) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("Span"); err != nil { +func (p *Span) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Span"); err != nil { return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } if p != nil { - if err := p.writeField1(oprot); err != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(oprot); err != nil { + if err := p.writeField2(ctx, oprot); err != nil { return err } - if err := p.writeField3(oprot); err != nil { + if err := p.writeField3(ctx, oprot); err != nil { return err } - if err := p.writeField4(oprot); err != nil { + if err := p.writeField4(ctx, oprot); err != nil { return err } - if err := p.writeField5(oprot); err != nil { + if err := p.writeField5(ctx, oprot); err != nil { return err } - if err := p.writeField6(oprot); err != nil { + if err := p.writeField6(ctx, oprot); err != nil { return err } - if err := p.writeField7(oprot); err != nil { + if err := p.writeField7(ctx, oprot); err != nil { return err } - if err := p.writeField8(oprot); err != nil { + if err := p.writeField8(ctx, oprot); err != nil { return err } - if err := p.writeField9(oprot); err != nil { + if err := p.writeField9(ctx, oprot); err != nil { return err } - if err := p.writeField10(oprot); err != nil { + if err := p.writeField10(ctx, oprot); err != nil { return err } - if err := p.writeField11(oprot); err != nil { + if err := p.writeField11(ctx, oprot); err != nil { return err } } - if err := oprot.WriteFieldStop(); err != nil { + if err := oprot.WriteFieldStop(ctx); err != nil { return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(); err != nil { + if err := oprot.WriteStructEnd(ctx); err != nil { return thrift.PrependError("write struct stop error: ", err) } return nil } -func (p *Span) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("traceIdLow", thrift.I64, 1); err != nil { +func (p *Span) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "traceIdLow", thrift.I64, 1); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:traceIdLow: ", p), err) } - if err := oprot.WriteI64(int64(p.TraceIdLow)); err != nil { + if err := oprot.WriteI64(ctx, int64(p.TraceIdLow)); err != nil { return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { + if err := oprot.WriteFieldEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field end error 1:traceIdLow: ", p), err) } return err } -func (p *Span) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("traceIdHigh", thrift.I64, 2); err != nil { +func (p *Span) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "traceIdHigh", thrift.I64, 2); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdHigh: ", p), err) } - if err := oprot.WriteI64(int64(p.TraceIdHigh)); err != nil { + if err := oprot.WriteI64(ctx, int64(p.TraceIdHigh)); err != nil { return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { + if err := oprot.WriteFieldEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdHigh: ", p), err) } return err } -func (p *Span) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("spanId", thrift.I64, 3); err != nil { +func (p *Span) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "spanId", thrift.I64, 3); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:spanId: ", p), err) } - if err := oprot.WriteI64(int64(p.SpanId)); err != nil { + if err := oprot.WriteI64(ctx, int64(p.SpanId)); err != nil { return thrift.PrependError(fmt.Sprintf("%T.spanId (3) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { + if err := oprot.WriteFieldEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field end error 3:spanId: ", p), err) } return err } -func (p *Span) writeField4(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("parentSpanId", thrift.I64, 4); err != nil { +func (p *Span) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "parentSpanId", thrift.I64, 4); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:parentSpanId: ", p), err) } - if err := oprot.WriteI64(int64(p.ParentSpanId)); err != nil { + if err := oprot.WriteI64(ctx, int64(p.ParentSpanId)); err != nil { return thrift.PrependError(fmt.Sprintf("%T.parentSpanId (4) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { + if err := oprot.WriteFieldEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field end error 4:parentSpanId: ", p), err) } return err } -func (p *Span) writeField5(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("operationName", thrift.STRING, 5); err != nil { +func (p *Span) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "operationName", thrift.STRING, 5); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:operationName: ", p), err) } - if err := oprot.WriteString(string(p.OperationName)); err != nil { + if err := oprot.WriteString(ctx, string(p.OperationName)); err != nil { return thrift.PrependError(fmt.Sprintf("%T.operationName (5) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { + if err := oprot.WriteFieldEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field end error 5:operationName: ", p), err) } return err } -func (p *Span) writeField6(oprot thrift.TProtocol) (err error) { +func (p *Span) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { if p.IsSetReferences() { - if err := oprot.WriteFieldBegin("references", thrift.LIST, 6); err != nil { + if err := oprot.WriteFieldBegin(ctx, "references", thrift.LIST, 6); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:references: ", p), err) } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.References)); err != nil { + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.References)); err != nil { return thrift.PrependError("error writing list begin: ", err) } for _, v := range p.References { - if err := v.Write(oprot); err != nil { + if err := v.Write(ctx, oprot); err != nil { return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) } } - if err := oprot.WriteListEnd(); err != nil { + if err := oprot.WriteListEnd(ctx); err != nil { return thrift.PrependError("error writing list end: ", err) } - if err := oprot.WriteFieldEnd(); err != nil { + if err := oprot.WriteFieldEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field end error 6:references: ", p), err) } } return err } -func (p *Span) writeField7(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("flags", thrift.I32, 7); err != nil { +func (p *Span) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "flags", thrift.I32, 7); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:flags: ", p), err) } - if err := oprot.WriteI32(int32(p.Flags)); err != nil { + if err := oprot.WriteI32(ctx, int32(p.Flags)); err != nil { return thrift.PrependError(fmt.Sprintf("%T.flags (7) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { + if err := oprot.WriteFieldEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field end error 7:flags: ", p), err) } return err } -func (p *Span) writeField8(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("startTime", thrift.I64, 8); err != nil { +func (p *Span) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "startTime", thrift.I64, 8); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:startTime: ", p), err) } - if err := oprot.WriteI64(int64(p.StartTime)); err != nil { + if err := oprot.WriteI64(ctx, int64(p.StartTime)); err != nil { return thrift.PrependError(fmt.Sprintf("%T.startTime (8) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { + if err := oprot.WriteFieldEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field end error 8:startTime: ", p), err) } return err } -func (p *Span) writeField9(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("duration", thrift.I64, 9); err != nil { +func (p *Span) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "duration", thrift.I64, 9); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:duration: ", p), err) } - if err := oprot.WriteI64(int64(p.Duration)); err != nil { + if err := oprot.WriteI64(ctx, int64(p.Duration)); err != nil { return thrift.PrependError(fmt.Sprintf("%T.duration (9) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { + if err := oprot.WriteFieldEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field end error 9:duration: ", p), err) } return err } -func (p *Span) writeField10(oprot thrift.TProtocol) (err error) { +func (p *Span) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) { if p.IsSetTags() { - if err := oprot.WriteFieldBegin("tags", thrift.LIST, 10); err != nil { + if err := oprot.WriteFieldBegin(ctx, "tags", thrift.LIST, 10); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:tags: ", p), err) } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tags)); err != nil { + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Tags)); err != nil { return thrift.PrependError("error writing list begin: ", err) } for _, v := range p.Tags { - if err := v.Write(oprot); err != nil { + if err := v.Write(ctx, oprot); err != nil { return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) } } - if err := oprot.WriteListEnd(); err != nil { + if err := oprot.WriteListEnd(ctx); err != nil { return thrift.PrependError("error writing list end: ", err) } - if err := oprot.WriteFieldEnd(); err != nil { + if err := oprot.WriteFieldEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field end error 10:tags: ", p), err) } } return err } -func (p *Span) writeField11(oprot thrift.TProtocol) (err error) { +func (p *Span) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) { if p.IsSetLogs() { - if err := oprot.WriteFieldBegin("logs", thrift.LIST, 11); err != nil { + if err := oprot.WriteFieldBegin(ctx, "logs", thrift.LIST, 11); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:logs: ", p), err) } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Logs)); err != nil { + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Logs)); err != nil { return thrift.PrependError("error writing list begin: ", err) } for _, v := range p.Logs { - if err := v.Write(oprot); err != nil { + if err := v.Write(ctx, oprot); err != nil { return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) } } - if err := oprot.WriteListEnd(); err != nil { + if err := oprot.WriteListEnd(ctx); err != nil { return thrift.PrependError("error writing list end: ", err) } - if err := oprot.WriteFieldEnd(); err != nil { + if err := oprot.WriteFieldEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field end error 11:logs: ", p), err) } } return err } +func (p *Span) Equals(other *Span) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.TraceIdLow != other.TraceIdLow { + return false + } + if p.TraceIdHigh != other.TraceIdHigh { + return false + } + if p.SpanId != other.SpanId { + return false + } + if p.ParentSpanId != other.ParentSpanId { + return false + } + if p.OperationName != other.OperationName { + return false + } + if len(p.References) != len(other.References) { + return false + } + for i, _tgt := range p.References { + _src5 := other.References[i] + if !_tgt.Equals(_src5) { + return false + } + } + if p.Flags != other.Flags { + return false + } + if p.StartTime != other.StartTime { + return false + } + if p.Duration != other.Duration { + return false + } + if len(p.Tags) != len(other.Tags) { + return false + } + for i, _tgt := range p.Tags { + _src6 := other.Tags[i] + if !_tgt.Equals(_src6) { + return false + } + } + if len(p.Logs) != len(other.Logs) { + return false + } + for i, _tgt := range p.Logs { + _src7 := other.Logs[i] + if !_tgt.Equals(_src7) { + return false + } + } + return true +} + func (p *Span) String() string { if p == nil { return "" @@ -1631,15 +1782,15 @@ func (p *Process) IsSetTags() bool { return p.Tags != nil } -func (p *Process) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { +func (p *Process) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) } var issetServiceName bool = false for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) if err != nil { return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) } @@ -1649,35 +1800,35 @@ func (p *Process) Read(iprot thrift.TProtocol) error { switch fieldId { case 1: if fieldTypeId == thrift.STRING { - if err := p.ReadField1(iprot); err != nil { + if err := p.ReadField1(ctx, iprot); err != nil { return err } + issetServiceName = true } else { - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } - issetServiceName = true case 2: if fieldTypeId == thrift.LIST { - if err := p.ReadField2(iprot); err != nil { + if err := p.ReadField2(ctx, iprot); err != nil { return err } } else { - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } default: - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } - if err := iprot.ReadFieldEnd(); err != nil { + if err := iprot.ReadFieldEnd(ctx); err != nil { return err } } - if err := iprot.ReadStructEnd(); err != nil { + if err := iprot.ReadStructEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } if !issetServiceName { @@ -1686,8 +1837,8 @@ func (p *Process) Read(iprot thrift.TProtocol) error { return nil } -func (p *Process) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { +func (p *Process) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { return thrift.PrependError("error reading field 1: ", err) } else { p.ServiceName = v @@ -1695,83 +1846,104 @@ func (p *Process) ReadField1(iprot thrift.TProtocol) error { return nil } -func (p *Process) ReadField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() +func (p *Process) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) if err != nil { return thrift.PrependError("error reading list begin: ", err) } tSlice := make([]*Tag, 0, size) p.Tags = tSlice for i := 0; i < size; i++ { - _elem4 := &Tag{} - if err := _elem4.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err) + _elem8 := &Tag{} + if err := _elem8.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem8), err) } - p.Tags = append(p.Tags, _elem4) + p.Tags = append(p.Tags, _elem8) } - if err := iprot.ReadListEnd(); err != nil { + if err := iprot.ReadListEnd(ctx); err != nil { return thrift.PrependError("error reading list end: ", err) } return nil } -func (p *Process) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("Process"); err != nil { +func (p *Process) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Process"); err != nil { return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } if p != nil { - if err := p.writeField1(oprot); err != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(oprot); err != nil { + if err := p.writeField2(ctx, oprot); err != nil { return err } } - if err := oprot.WriteFieldStop(); err != nil { + if err := oprot.WriteFieldStop(ctx); err != nil { return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(); err != nil { + if err := oprot.WriteStructEnd(ctx); err != nil { return thrift.PrependError("write struct stop error: ", err) } return nil } -func (p *Process) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("serviceName", thrift.STRING, 1); err != nil { +func (p *Process) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "serviceName", thrift.STRING, 1); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err) } - if err := oprot.WriteString(string(p.ServiceName)); err != nil { + if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil { return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { + if err := oprot.WriteFieldEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err) } return err } -func (p *Process) writeField2(oprot thrift.TProtocol) (err error) { +func (p *Process) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { if p.IsSetTags() { - if err := oprot.WriteFieldBegin("tags", thrift.LIST, 2); err != nil { + if err := oprot.WriteFieldBegin(ctx, "tags", thrift.LIST, 2); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tags: ", p), err) } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tags)); err != nil { + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Tags)); err != nil { return thrift.PrependError("error writing list begin: ", err) } for _, v := range p.Tags { - if err := v.Write(oprot); err != nil { + if err := v.Write(ctx, oprot); err != nil { return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) } } - if err := oprot.WriteListEnd(); err != nil { + if err := oprot.WriteListEnd(ctx); err != nil { return thrift.PrependError("error writing list end: ", err) } - if err := oprot.WriteFieldEnd(); err != nil { + if err := oprot.WriteFieldEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tags: ", p), err) } } return err } +func (p *Process) Equals(other *Process) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.ServiceName != other.ServiceName { + return false + } + if len(p.Tags) != len(other.Tags) { + return false + } + for i, _tgt := range p.Tags { + _src9 := other.Tags[i] + if !_tgt.Equals(_src9) { + return false + } + } + return true +} + func (p *Process) String() string { if p == nil { return "" @@ -1779,12 +1951,231 @@ func (p *Process) String() string { return fmt.Sprintf("Process(%+v)", *p) } +// Attributes: +// - FullQueueDroppedSpans +// - TooLargeDroppedSpans +// - FailedToEmitSpans +type ClientStats struct { + FullQueueDroppedSpans int64 `thrift:"fullQueueDroppedSpans,1,required" db:"fullQueueDroppedSpans" json:"fullQueueDroppedSpans"` + TooLargeDroppedSpans int64 `thrift:"tooLargeDroppedSpans,2,required" db:"tooLargeDroppedSpans" json:"tooLargeDroppedSpans"` + FailedToEmitSpans int64 `thrift:"failedToEmitSpans,3,required" db:"failedToEmitSpans" json:"failedToEmitSpans"` +} + +func NewClientStats() *ClientStats { + return &ClientStats{} +} + +func (p *ClientStats) GetFullQueueDroppedSpans() int64 { + return p.FullQueueDroppedSpans +} + +func (p *ClientStats) GetTooLargeDroppedSpans() int64 { + return p.TooLargeDroppedSpans +} + +func (p *ClientStats) GetFailedToEmitSpans() int64 { + return p.FailedToEmitSpans +} +func (p *ClientStats) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetFullQueueDroppedSpans bool = false + var issetTooLargeDroppedSpans bool = false + var issetFailedToEmitSpans bool = false + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetFullQueueDroppedSpans = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetTooLargeDroppedSpans = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + issetFailedToEmitSpans = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetFullQueueDroppedSpans { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FullQueueDroppedSpans is not set")) + } + if !issetTooLargeDroppedSpans { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TooLargeDroppedSpans is not set")) + } + if !issetFailedToEmitSpans { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FailedToEmitSpans is not set")) + } + return nil +} + +func (p *ClientStats) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.FullQueueDroppedSpans = v + } + return nil +} + +func (p *ClientStats) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.TooLargeDroppedSpans = v + } + return nil +} + +func (p *ClientStats) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.FailedToEmitSpans = v + } + return nil +} + +func (p *ClientStats) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "ClientStats"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField2(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ClientStats) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "fullQueueDroppedSpans", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:fullQueueDroppedSpans: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.FullQueueDroppedSpans)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.fullQueueDroppedSpans (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:fullQueueDroppedSpans: ", p), err) + } + return err +} + +func (p *ClientStats) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "tooLargeDroppedSpans", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tooLargeDroppedSpans: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.TooLargeDroppedSpans)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.tooLargeDroppedSpans (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tooLargeDroppedSpans: ", p), err) + } + return err +} + +func (p *ClientStats) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "failedToEmitSpans", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:failedToEmitSpans: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.FailedToEmitSpans)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.failedToEmitSpans (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:failedToEmitSpans: ", p), err) + } + return err +} + +func (p *ClientStats) Equals(other *ClientStats) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.FullQueueDroppedSpans != other.FullQueueDroppedSpans { + return false + } + if p.TooLargeDroppedSpans != other.TooLargeDroppedSpans { + return false + } + if p.FailedToEmitSpans != other.FailedToEmitSpans { + return false + } + return true +} + +func (p *ClientStats) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ClientStats(%+v)", *p) +} + // Attributes: // - Process // - Spans +// - SeqNo +// - Stats type Batch struct { - Process *Process `thrift:"process,1,required" db:"process" json:"process"` - Spans []*Span `thrift:"spans,2,required" db:"spans" json:"spans"` + Process *Process `thrift:"process,1,required" db:"process" json:"process"` + Spans []*Span `thrift:"spans,2,required" db:"spans" json:"spans"` + SeqNo *int64 `thrift:"seqNo,3" db:"seqNo" json:"seqNo,omitempty"` + Stats *ClientStats `thrift:"stats,4" db:"stats" json:"stats,omitempty"` } func NewBatch() *Batch { @@ -1803,12 +2194,38 @@ func (p *Batch) GetProcess() *Process { func (p *Batch) GetSpans() []*Span { return p.Spans } + +var Batch_SeqNo_DEFAULT int64 + +func (p *Batch) GetSeqNo() int64 { + if !p.IsSetSeqNo() { + return Batch_SeqNo_DEFAULT + } + return *p.SeqNo +} + +var Batch_Stats_DEFAULT *ClientStats + +func (p *Batch) GetStats() *ClientStats { + if !p.IsSetStats() { + return Batch_Stats_DEFAULT + } + return p.Stats +} func (p *Batch) IsSetProcess() bool { return p.Process != nil } -func (p *Batch) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { +func (p *Batch) IsSetSeqNo() bool { + return p.SeqNo != nil +} + +func (p *Batch) IsSetStats() bool { + return p.Stats != nil +} + +func (p *Batch) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) } @@ -1816,7 +2233,7 @@ func (p *Batch) Read(iprot thrift.TProtocol) error { var issetSpans bool = false for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) if err != nil { return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) } @@ -1826,36 +2243,56 @@ func (p *Batch) Read(iprot thrift.TProtocol) error { switch fieldId { case 1: if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { + if err := p.ReadField1(ctx, iprot); err != nil { return err } + issetProcess = true } else { - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } - issetProcess = true case 2: if fieldTypeId == thrift.LIST { - if err := p.ReadField2(iprot); err != nil { + if err := p.ReadField2(ctx, iprot); err != nil { return err } + issetSpans = true } else { - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } - issetSpans = true default: - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } - if err := iprot.ReadFieldEnd(); err != nil { + if err := iprot.ReadFieldEnd(ctx); err != nil { return err } } - if err := iprot.ReadStructEnd(); err != nil { + if err := iprot.ReadStructEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } if !issetProcess { @@ -1867,89 +2304,174 @@ func (p *Batch) Read(iprot thrift.TProtocol) error { return nil } -func (p *Batch) ReadField1(iprot thrift.TProtocol) error { +func (p *Batch) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { p.Process = &Process{} - if err := p.Process.Read(iprot); err != nil { + if err := p.Process.Read(ctx, iprot); err != nil { return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Process), err) } return nil } -func (p *Batch) ReadField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() +func (p *Batch) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) if err != nil { return thrift.PrependError("error reading list begin: ", err) } tSlice := make([]*Span, 0, size) p.Spans = tSlice for i := 0; i < size; i++ { - _elem5 := &Span{} - if err := _elem5.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem5), err) + _elem10 := &Span{} + if err := _elem10.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem10), err) } - p.Spans = append(p.Spans, _elem5) + p.Spans = append(p.Spans, _elem10) } - if err := iprot.ReadListEnd(); err != nil { + if err := iprot.ReadListEnd(ctx); err != nil { return thrift.PrependError("error reading list end: ", err) } return nil } -func (p *Batch) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("Batch"); err != nil { +func (p *Batch) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.SeqNo = &v + } + return nil +} + +func (p *Batch) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + p.Stats = &ClientStats{} + if err := p.Stats.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Stats), err) + } + return nil +} + +func (p *Batch) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Batch"); err != nil { return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } if p != nil { - if err := p.writeField1(oprot); err != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField2(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { return err } - if err := p.writeField2(oprot); err != nil { + if err := p.writeField4(ctx, oprot); err != nil { return err } } - if err := oprot.WriteFieldStop(); err != nil { + if err := oprot.WriteFieldStop(ctx); err != nil { return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(); err != nil { + if err := oprot.WriteStructEnd(ctx); err != nil { return thrift.PrependError("write struct stop error: ", err) } return nil } -func (p *Batch) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("process", thrift.STRUCT, 1); err != nil { +func (p *Batch) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "process", thrift.STRUCT, 1); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:process: ", p), err) } - if err := p.Process.Write(oprot); err != nil { + if err := p.Process.Write(ctx, oprot); err != nil { return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Process), err) } - if err := oprot.WriteFieldEnd(); err != nil { + if err := oprot.WriteFieldEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field end error 1:process: ", p), err) } return err } -func (p *Batch) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("spans", thrift.LIST, 2); err != nil { +func (p *Batch) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 2); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:spans: ", p), err) } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Spans)); err != nil { + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil { return thrift.PrependError("error writing list begin: ", err) } for _, v := range p.Spans { - if err := v.Write(oprot); err != nil { + if err := v.Write(ctx, oprot); err != nil { return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) } } - if err := oprot.WriteListEnd(); err != nil { + if err := oprot.WriteListEnd(ctx); err != nil { return thrift.PrependError("error writing list end: ", err) } - if err := oprot.WriteFieldEnd(); err != nil { + if err := oprot.WriteFieldEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field end error 2:spans: ", p), err) } return err } +func (p *Batch) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSeqNo() { + if err := oprot.WriteFieldBegin(ctx, "seqNo", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:seqNo: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(*p.SeqNo)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.seqNo (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:seqNo: ", p), err) + } + } + return err +} + +func (p *Batch) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetStats() { + if err := oprot.WriteFieldBegin(ctx, "stats", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:stats: ", p), err) + } + if err := p.Stats.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Stats), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:stats: ", p), err) + } + } + return err +} + +func (p *Batch) Equals(other *Batch) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.Process.Equals(other.Process) { + return false + } + if len(p.Spans) != len(other.Spans) { + return false + } + for i, _tgt := range p.Spans { + _src11 := other.Spans[i] + if !_tgt.Equals(_src11) { + return false + } + } + if p.SeqNo != other.SeqNo { + if p.SeqNo == nil || other.SeqNo == nil { + return false + } + if (*p.SeqNo) != (*other.SeqNo) { + return false + } + } + if !p.Stats.Equals(other.Stats) { + return false + } + return true +} + func (p *Batch) String() string { if p == nil { return "" @@ -1970,15 +2492,15 @@ func NewBatchSubmitResponse() *BatchSubmitResponse { func (p *BatchSubmitResponse) GetOk() bool { return p.Ok } -func (p *BatchSubmitResponse) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { +func (p *BatchSubmitResponse) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) } var issetOk bool = false for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) if err != nil { return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) } @@ -1988,25 +2510,25 @@ func (p *BatchSubmitResponse) Read(iprot thrift.TProtocol) error { switch fieldId { case 1: if fieldTypeId == thrift.BOOL { - if err := p.ReadField1(iprot); err != nil { + if err := p.ReadField1(ctx, iprot); err != nil { return err } + issetOk = true } else { - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } - issetOk = true default: - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } - if err := iprot.ReadFieldEnd(); err != nil { + if err := iprot.ReadFieldEnd(ctx); err != nil { return err } } - if err := iprot.ReadStructEnd(); err != nil { + if err := iprot.ReadStructEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } if !issetOk { @@ -2015,8 +2537,8 @@ func (p *BatchSubmitResponse) Read(iprot thrift.TProtocol) error { return nil } -func (p *BatchSubmitResponse) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { +func (p *BatchSubmitResponse) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { return thrift.PrependError("error reading field 1: ", err) } else { p.Ok = v @@ -2024,37 +2546,49 @@ func (p *BatchSubmitResponse) ReadField1(iprot thrift.TProtocol) error { return nil } -func (p *BatchSubmitResponse) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("BatchSubmitResponse"); err != nil { +func (p *BatchSubmitResponse) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "BatchSubmitResponse"); err != nil { return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } if p != nil { - if err := p.writeField1(oprot); err != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } } - if err := oprot.WriteFieldStop(); err != nil { + if err := oprot.WriteFieldStop(ctx); err != nil { return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(); err != nil { + if err := oprot.WriteStructEnd(ctx); err != nil { return thrift.PrependError("write struct stop error: ", err) } return nil } -func (p *BatchSubmitResponse) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("ok", thrift.BOOL, 1); err != nil { +func (p *BatchSubmitResponse) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "ok", thrift.BOOL, 1); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err) } - if err := oprot.WriteBool(bool(p.Ok)); err != nil { + if err := oprot.WriteBool(ctx, bool(p.Ok)); err != nil { return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(); err != nil { + if err := oprot.WriteFieldEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err) } return err } +func (p *BatchSubmitResponse) Equals(other *BatchSubmitResponse) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Ok != other.Ok { + return false + } + return true +} + func (p *BatchSubmitResponse) String() string { if p == nil { return "" @@ -2065,21 +2599,20 @@ func (p *BatchSubmitResponse) String() string { type Collector interface { // Parameters: // - Batches - SubmitBatches(ctx context.Context, batches []*Batch) (r []*BatchSubmitResponse, err error) + SubmitBatches(ctx context.Context, batches []*Batch) (_r []*BatchSubmitResponse, _err error) } type CollectorClient struct { - c thrift.TClient + c thrift.TClient + meta thrift.ResponseMeta } -// Deprecated: Use NewCollector instead func NewCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *CollectorClient { return &CollectorClient{ c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), } } -// Deprecated: Use NewCollector instead func NewCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *CollectorClient { return &CollectorClient{ c: thrift.NewTStandardClient(iprot, oprot), @@ -2092,16 +2625,31 @@ func NewCollectorClient(c thrift.TClient) *CollectorClient { } } +func (p *CollectorClient) Client_() thrift.TClient { + return p.c +} + +func (p *CollectorClient) LastResponseMeta_() thrift.ResponseMeta { + return p.meta +} + +func (p *CollectorClient) SetLastResponseMeta_(meta thrift.ResponseMeta) { + p.meta = meta +} + // Parameters: // - Batches -func (p *CollectorClient) SubmitBatches(ctx context.Context, batches []*Batch) (r []*BatchSubmitResponse, err error) { - var _args6 CollectorSubmitBatchesArgs - _args6.Batches = batches - var _result7 CollectorSubmitBatchesResult - if err = p.c.Call(ctx, "submitBatches", &_args6, &_result7); err != nil { +func (p *CollectorClient) SubmitBatches(ctx context.Context, batches []*Batch) (_r []*BatchSubmitResponse, _err error) { + var _args12 CollectorSubmitBatchesArgs + _args12.Batches = batches + var _result14 CollectorSubmitBatchesResult + var _meta13 thrift.ResponseMeta + _meta13, _err = p.Client_().Call(ctx, "submitBatches", &_args12, &_result14) + p.SetLastResponseMeta_(_meta13) + if _err != nil { return } - return _result7.GetSuccess(), nil + return _result14.GetSuccess(), nil } type CollectorProcessor struct { @@ -2124,27 +2672,27 @@ func (p *CollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction func NewCollectorProcessor(handler Collector) *CollectorProcessor { - self8 := &CollectorProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self8.processorMap["submitBatches"] = &collectorProcessorSubmitBatches{handler: handler} - return self8 + self15 := &CollectorProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} + self15.processorMap["submitBatches"] = &collectorProcessorSubmitBatches{handler: handler} + return self15 } func (p *CollectorProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return false, err + name, _, seqId, err2 := iprot.ReadMessageBegin(ctx) + if err2 != nil { + return false, thrift.WrapTException(err2) } if processor, ok := p.GetProcessorFunction(name); ok { return processor.Process(ctx, seqId, iprot, oprot) } - iprot.Skip(thrift.STRUCT) - iprot.ReadMessageEnd() - x9 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) - oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) - x9.Write(oprot) - oprot.WriteMessageEnd() + iprot.Skip(ctx, thrift.STRUCT) + iprot.ReadMessageEnd(ctx) + x16 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) + oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId) + x16.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) oprot.Flush(ctx) - return false, x9 + return false, x16 } @@ -2154,41 +2702,72 @@ type collectorProcessorSubmitBatches struct { func (p *collectorProcessorSubmitBatches) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { args := CollectorSubmitBatchesArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("submitBatches", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() + var err2 error + if err2 = args.Read(ctx, iprot); err2 != nil { + iprot.ReadMessageEnd(ctx) + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) + oprot.WriteMessageBegin(ctx, "submitBatches", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) oprot.Flush(ctx) - return false, err + return false, thrift.WrapTException(err2) + } + iprot.ReadMessageEnd(ctx) + + tickerCancel := func() {} + // Start a goroutine to do server side connectivity check. + if thrift.ServerConnectivityCheckInterval > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + defer cancel() + var tickerCtx context.Context + tickerCtx, tickerCancel = context.WithCancel(context.Background()) + defer tickerCancel() + go func(ctx context.Context, cancel context.CancelFunc) { + ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if !iprot.Transport().IsOpen() { + cancel() + return + } + } + } + }(tickerCtx, cancel) } - iprot.ReadMessageEnd() result := CollectorSubmitBatchesResult{} var retval []*BatchSubmitResponse - var err2 error if retval, err2 = p.handler.SubmitBatches(ctx, args.Batches); err2 != nil { + tickerCancel() + if err2 == thrift.ErrAbandonRequest { + return false, thrift.WrapTException(err2) + } x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitBatches: "+err2.Error()) - oprot.WriteMessageBegin("submitBatches", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() + oprot.WriteMessageBegin(ctx, "submitBatches", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) oprot.Flush(ctx) - return true, err2 + return true, thrift.WrapTException(err2) } else { result.Success = retval } - if err2 = oprot.WriteMessageBegin("submitBatches", thrift.REPLY, seqId); err2 != nil { - err = err2 + tickerCancel() + if err2 = oprot.WriteMessageBegin(ctx, "submitBatches", thrift.REPLY, seqId); err2 != nil { + err = thrift.WrapTException(err2) } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 + if err2 = result.Write(ctx, oprot); err == nil && err2 != nil { + err = thrift.WrapTException(err2) } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil { + err = thrift.WrapTException(err2) } if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + err = thrift.WrapTException(err2) } if err != nil { return @@ -2211,13 +2790,13 @@ func NewCollectorSubmitBatchesArgs() *CollectorSubmitBatchesArgs { func (p *CollectorSubmitBatchesArgs) GetBatches() []*Batch { return p.Batches } -func (p *CollectorSubmitBatchesArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { +func (p *CollectorSubmitBatchesArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) } for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) if err != nil { return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) } @@ -2227,83 +2806,83 @@ func (p *CollectorSubmitBatchesArgs) Read(iprot thrift.TProtocol) error { switch fieldId { case 1: if fieldTypeId == thrift.LIST { - if err := p.ReadField1(iprot); err != nil { + if err := p.ReadField1(ctx, iprot); err != nil { return err } } else { - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } default: - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } - if err := iprot.ReadFieldEnd(); err != nil { + if err := iprot.ReadFieldEnd(ctx); err != nil { return err } } - if err := iprot.ReadStructEnd(); err != nil { + if err := iprot.ReadStructEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } return nil } -func (p *CollectorSubmitBatchesArgs) ReadField1(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() +func (p *CollectorSubmitBatchesArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) if err != nil { return thrift.PrependError("error reading list begin: ", err) } tSlice := make([]*Batch, 0, size) p.Batches = tSlice for i := 0; i < size; i++ { - _elem10 := &Batch{} - if err := _elem10.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem10), err) + _elem17 := &Batch{} + if err := _elem17.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem17), err) } - p.Batches = append(p.Batches, _elem10) + p.Batches = append(p.Batches, _elem17) } - if err := iprot.ReadListEnd(); err != nil { + if err := iprot.ReadListEnd(ctx); err != nil { return thrift.PrependError("error reading list end: ", err) } return nil } -func (p *CollectorSubmitBatchesArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("submitBatches_args"); err != nil { +func (p *CollectorSubmitBatchesArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "submitBatches_args"); err != nil { return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } if p != nil { - if err := p.writeField1(oprot); err != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } } - if err := oprot.WriteFieldStop(); err != nil { + if err := oprot.WriteFieldStop(ctx); err != nil { return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(); err != nil { + if err := oprot.WriteStructEnd(ctx); err != nil { return thrift.PrependError("write struct stop error: ", err) } return nil } -func (p *CollectorSubmitBatchesArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("batches", thrift.LIST, 1); err != nil { +func (p *CollectorSubmitBatchesArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "batches", thrift.LIST, 1); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batches: ", p), err) } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Batches)); err != nil { + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Batches)); err != nil { return thrift.PrependError("error writing list begin: ", err) } for _, v := range p.Batches { - if err := v.Write(oprot); err != nil { + if err := v.Write(ctx, oprot); err != nil { return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) } } - if err := oprot.WriteListEnd(); err != nil { + if err := oprot.WriteListEnd(ctx); err != nil { return thrift.PrependError("error writing list end: ", err) } - if err := oprot.WriteFieldEnd(); err != nil { + if err := oprot.WriteFieldEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batches: ", p), err) } return err @@ -2335,13 +2914,13 @@ func (p *CollectorSubmitBatchesResult) IsSetSuccess() bool { return p.Success != nil } -func (p *CollectorSubmitBatchesResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { +func (p *CollectorSubmitBatchesResult) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) } for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) if err != nil { return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) } @@ -2351,84 +2930,84 @@ func (p *CollectorSubmitBatchesResult) Read(iprot thrift.TProtocol) error { switch fieldId { case 0: if fieldTypeId == thrift.LIST { - if err := p.ReadField0(iprot); err != nil { + if err := p.ReadField0(ctx, iprot); err != nil { return err } } else { - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } default: - if err := iprot.Skip(fieldTypeId); err != nil { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { return err } } - if err := iprot.ReadFieldEnd(); err != nil { + if err := iprot.ReadFieldEnd(ctx); err != nil { return err } } - if err := iprot.ReadStructEnd(); err != nil { + if err := iprot.ReadStructEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } return nil } -func (p *CollectorSubmitBatchesResult) ReadField0(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() +func (p *CollectorSubmitBatchesResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) if err != nil { return thrift.PrependError("error reading list begin: ", err) } tSlice := make([]*BatchSubmitResponse, 0, size) p.Success = tSlice for i := 0; i < size; i++ { - _elem11 := &BatchSubmitResponse{} - if err := _elem11.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem11), err) + _elem18 := &BatchSubmitResponse{} + if err := _elem18.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem18), err) } - p.Success = append(p.Success, _elem11) + p.Success = append(p.Success, _elem18) } - if err := iprot.ReadListEnd(); err != nil { + if err := iprot.ReadListEnd(ctx); err != nil { return thrift.PrependError("error reading list end: ", err) } return nil } -func (p *CollectorSubmitBatchesResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("submitBatches_result"); err != nil { +func (p *CollectorSubmitBatchesResult) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "submitBatches_result"); err != nil { return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } if p != nil { - if err := p.writeField0(oprot); err != nil { + if err := p.writeField0(ctx, oprot); err != nil { return err } } - if err := oprot.WriteFieldStop(); err != nil { + if err := oprot.WriteFieldStop(ctx); err != nil { return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(); err != nil { + if err := oprot.WriteStructEnd(ctx); err != nil { return thrift.PrependError("write struct stop error: ", err) } return nil } -func (p *CollectorSubmitBatchesResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *CollectorSubmitBatchesResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.LIST, 0); err != nil { + if err := oprot.WriteFieldBegin(ctx, "success", thrift.LIST, 0); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Success)); err != nil { + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Success)); err != nil { return thrift.PrependError("error writing list begin: ", err) } for _, v := range p.Success { - if err := v.Write(oprot); err != nil { + if err := v.Write(ctx, oprot); err != nil { return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) } } - if err := oprot.WriteListEnd(); err != nil { + if err := oprot.WriteListEnd(ctx); err != nil { return thrift.PrependError("error writing list end: ", err) } - if err := oprot.WriteFieldEnd(); err != nil { + if err := oprot.WriteFieldEnd(ctx); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) } } diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/GoUnusedProtection__.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/GoUnusedProtection__.go new file mode 100644 index 000000000..ebf43018f --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/GoUnusedProtection__.go @@ -0,0 +1,6 @@ +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. + +package zipkincore + +var GoUnusedProtection__ int; + diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore-consts.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore-consts.go new file mode 100644 index 000000000..043ecba96 --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore-consts.go @@ -0,0 +1,39 @@ +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. + +package zipkincore + +import ( + "bytes" + "context" + "fmt" + "time" + + "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = time.Now +var _ = bytes.Equal + +const CLIENT_SEND = "cs" +const CLIENT_RECV = "cr" +const SERVER_SEND = "ss" +const SERVER_RECV = "sr" +const MESSAGE_SEND = "ms" +const MESSAGE_RECV = "mr" +const WIRE_SEND = "ws" +const WIRE_RECV = "wr" +const CLIENT_SEND_FRAGMENT = "csf" +const CLIENT_RECV_FRAGMENT = "crf" +const SERVER_SEND_FRAGMENT = "ssf" +const SERVER_RECV_FRAGMENT = "srf" +const LOCAL_COMPONENT = "lc" +const CLIENT_ADDR = "ca" +const SERVER_ADDR = "sa" +const MESSAGE_ADDR = "ma" + +func init() { +} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore.go new file mode 100644 index 000000000..7f46810e0 --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore.go @@ -0,0 +1,2067 @@ +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. + +package zipkincore + +import ( + "bytes" + "context" + "database/sql/driver" + "errors" + "fmt" + "time" + + "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = time.Now +var _ = bytes.Equal + +type AnnotationType int64 + +const ( + AnnotationType_BOOL AnnotationType = 0 + AnnotationType_BYTES AnnotationType = 1 + AnnotationType_I16 AnnotationType = 2 + AnnotationType_I32 AnnotationType = 3 + AnnotationType_I64 AnnotationType = 4 + AnnotationType_DOUBLE AnnotationType = 5 + AnnotationType_STRING AnnotationType = 6 +) + +func (p AnnotationType) String() string { + switch p { + case AnnotationType_BOOL: + return "BOOL" + case AnnotationType_BYTES: + return "BYTES" + case AnnotationType_I16: + return "I16" + case AnnotationType_I32: + return "I32" + case AnnotationType_I64: + return "I64" + case AnnotationType_DOUBLE: + return "DOUBLE" + case AnnotationType_STRING: + return "STRING" + } + return "" +} + +func AnnotationTypeFromString(s string) (AnnotationType, error) { + switch s { + case "BOOL": + return AnnotationType_BOOL, nil + case "BYTES": + return AnnotationType_BYTES, nil + case "I16": + return AnnotationType_I16, nil + case "I32": + return AnnotationType_I32, nil + case "I64": + return AnnotationType_I64, nil + case "DOUBLE": + return AnnotationType_DOUBLE, nil + case "STRING": + return AnnotationType_STRING, nil + } + return AnnotationType(0), fmt.Errorf("not a valid AnnotationType string") +} + +func AnnotationTypePtr(v AnnotationType) *AnnotationType { return &v } + +func (p AnnotationType) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *AnnotationType) UnmarshalText(text []byte) error { + q, err := AnnotationTypeFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *AnnotationType) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = AnnotationType(v) + return nil +} + +func (p *AnnotationType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +// Indicates the network context of a service recording an annotation with two +// exceptions. +// +// When a BinaryAnnotation, and key is CLIENT_ADDR or SERVER_ADDR, +// the endpoint indicates the source or destination of an RPC. This exception +// allows zipkin to display network context of uninstrumented services, or +// clients such as web browsers. +// +// Attributes: +// - Ipv4: IPv4 host address packed into 4 bytes. +// +// Ex for the ip 1.2.3.4, it would be (1 << 24) | (2 << 16) | (3 << 8) | 4 +// - Port: IPv4 port +// +// Note: this is to be treated as an unsigned integer, so watch for negatives. +// +// Conventionally, when the port isn't known, port = 0. +// - ServiceName: Service name in lowercase, such as "memcache" or "zipkin-web" +// +// Conventionally, when the service name isn't known, service_name = "unknown". +// - Ipv6: IPv6 host address packed into 16 bytes. Ex Inet6Address.getBytes() +type Endpoint struct { + Ipv4 int32 `thrift:"ipv4,1" db:"ipv4" json:"ipv4"` + Port int16 `thrift:"port,2" db:"port" json:"port"` + ServiceName string `thrift:"service_name,3" db:"service_name" json:"service_name"` + Ipv6 []byte `thrift:"ipv6,4" db:"ipv6" json:"ipv6,omitempty"` +} + +func NewEndpoint() *Endpoint { + return &Endpoint{} +} + +func (p *Endpoint) GetIpv4() int32 { + return p.Ipv4 +} + +func (p *Endpoint) GetPort() int16 { + return p.Port +} + +func (p *Endpoint) GetServiceName() string { + return p.ServiceName +} + +var Endpoint_Ipv6_DEFAULT []byte + +func (p *Endpoint) GetIpv6() []byte { + return p.Ipv6 +} +func (p *Endpoint) IsSetIpv6() bool { + return p.Ipv6 != nil +} + +func (p *Endpoint) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I16 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *Endpoint) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Ipv4 = v + } + return nil +} + +func (p *Endpoint) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI16(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Port = v + } + return nil +} + +func (p *Endpoint) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.ServiceName = v + } + return nil +} + +func (p *Endpoint) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.Ipv6 = v + } + return nil +} + +func (p *Endpoint) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Endpoint"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField2(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { + return err + } + if err := p.writeField4(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Endpoint) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "ipv4", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ipv4: ", p), err) + } + if err := oprot.WriteI32(ctx, int32(p.Ipv4)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ipv4 (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ipv4: ", p), err) + } + return err +} + +func (p *Endpoint) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "port", thrift.I16, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:port: ", p), err) + } + if err := oprot.WriteI16(ctx, int16(p.Port)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.port (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:port: ", p), err) + } + return err +} + +func (p *Endpoint) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "service_name", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:service_name: ", p), err) + } + if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.service_name (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:service_name: ", p), err) + } + return err +} + +func (p *Endpoint) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetIpv6() { + if err := oprot.WriteFieldBegin(ctx, "ipv6", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:ipv6: ", p), err) + } + if err := oprot.WriteBinary(ctx, p.Ipv6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ipv6 (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:ipv6: ", p), err) + } + } + return err +} + +func (p *Endpoint) Equals(other *Endpoint) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Ipv4 != other.Ipv4 { + return false + } + if p.Port != other.Port { + return false + } + if p.ServiceName != other.ServiceName { + return false + } + if bytes.Compare(p.Ipv6, other.Ipv6) != 0 { + return false + } + return true +} + +func (p *Endpoint) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Endpoint(%+v)", *p) +} + +// An annotation is similar to a log statement. It includes a host field which +// allows these events to be attributed properly, and also aggregatable. +// +// Attributes: +// - Timestamp: Microseconds from epoch. +// +// This value should use the most precise value possible. For example, +// gettimeofday or syncing nanoTime against a tick of currentTimeMillis. +// - Value +// - Host: Always the host that recorded the event. By specifying the host you allow +// rollup of all events (such as client requests to a service) by IP address. +type Annotation struct { + Timestamp int64 `thrift:"timestamp,1" db:"timestamp" json:"timestamp"` + Value string `thrift:"value,2" db:"value" json:"value"` + Host *Endpoint `thrift:"host,3" db:"host" json:"host,omitempty"` +} + +func NewAnnotation() *Annotation { + return &Annotation{} +} + +func (p *Annotation) GetTimestamp() int64 { + return p.Timestamp +} + +func (p *Annotation) GetValue() string { + return p.Value +} + +var Annotation_Host_DEFAULT *Endpoint + +func (p *Annotation) GetHost() *Endpoint { + if !p.IsSetHost() { + return Annotation_Host_DEFAULT + } + return p.Host +} +func (p *Annotation) IsSetHost() bool { + return p.Host != nil +} + +func (p *Annotation) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *Annotation) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Timestamp = v + } + return nil +} + +func (p *Annotation) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Value = v + } + return nil +} + +func (p *Annotation) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + p.Host = &Endpoint{} + if err := p.Host.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err) + } + return nil +} + +func (p *Annotation) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Annotation"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField2(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Annotation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.Timestamp)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err) + } + return err +} + +func (p *Annotation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) + } + if err := oprot.WriteString(ctx, string(p.Value)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) + } + return err +} + +func (p *Annotation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetHost() { + if err := oprot.WriteFieldBegin(ctx, "host", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:host: ", p), err) + } + if err := p.Host.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:host: ", p), err) + } + } + return err +} + +func (p *Annotation) Equals(other *Annotation) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Timestamp != other.Timestamp { + return false + } + if p.Value != other.Value { + return false + } + if !p.Host.Equals(other.Host) { + return false + } + return true +} + +func (p *Annotation) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Annotation(%+v)", *p) +} + +// Binary annotations are tags applied to a Span to give it context. For +// example, a binary annotation of "http.uri" could the path to a resource in a +// RPC call. +// +// Binary annotations of type STRING are always queryable, though more a +// historical implementation detail than a structural concern. +// +// Binary annotations can repeat, and vary on the host. Similar to Annotation, +// the host indicates who logged the event. This allows you to tell the +// difference between the client and server side of the same key. For example, +// the key "http.uri" might be different on the client and server side due to +// rewriting, like "/api/v1/myresource" vs "/myresource. Via the host field, +// you can see the different points of view, which often help in debugging. +// +// Attributes: +// - Key +// - Value +// - AnnotationType +// - Host: The host that recorded tag, which allows you to differentiate between +// multiple tags with the same key. There are two exceptions to this. +// +// When the key is CLIENT_ADDR or SERVER_ADDR, host indicates the source or +// destination of an RPC. This exception allows zipkin to display network +// context of uninstrumented services, or clients such as web browsers. +type BinaryAnnotation struct { + Key string `thrift:"key,1" db:"key" json:"key"` + Value []byte `thrift:"value,2" db:"value" json:"value"` + AnnotationType AnnotationType `thrift:"annotation_type,3" db:"annotation_type" json:"annotation_type"` + Host *Endpoint `thrift:"host,4" db:"host" json:"host,omitempty"` +} + +func NewBinaryAnnotation() *BinaryAnnotation { + return &BinaryAnnotation{} +} + +func (p *BinaryAnnotation) GetKey() string { + return p.Key +} + +func (p *BinaryAnnotation) GetValue() []byte { + return p.Value +} + +func (p *BinaryAnnotation) GetAnnotationType() AnnotationType { + return p.AnnotationType +} + +var BinaryAnnotation_Host_DEFAULT *Endpoint + +func (p *BinaryAnnotation) GetHost() *Endpoint { + if !p.IsSetHost() { + return BinaryAnnotation_Host_DEFAULT + } + return p.Host +} +func (p *BinaryAnnotation) IsSetHost() bool { + return p.Host != nil +} + +func (p *BinaryAnnotation) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *BinaryAnnotation) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Key = v + } + return nil +} + +func (p *BinaryAnnotation) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Value = v + } + return nil +} + +func (p *BinaryAnnotation) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + temp := AnnotationType(v) + p.AnnotationType = temp + } + return nil +} + +func (p *BinaryAnnotation) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + p.Host = &Endpoint{} + if err := p.Host.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err) + } + return nil +} + +func (p *BinaryAnnotation) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "BinaryAnnotation"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField2(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { + return err + } + if err := p.writeField4(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *BinaryAnnotation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "key", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) + } + if err := oprot.WriteString(ctx, string(p.Key)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) + } + return err +} + +func (p *BinaryAnnotation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) + } + if err := oprot.WriteBinary(ctx, p.Value); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) + } + return err +} + +func (p *BinaryAnnotation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "annotation_type", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:annotation_type: ", p), err) + } + if err := oprot.WriteI32(ctx, int32(p.AnnotationType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.annotation_type (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:annotation_type: ", p), err) + } + return err +} + +func (p *BinaryAnnotation) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetHost() { + if err := oprot.WriteFieldBegin(ctx, "host", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:host: ", p), err) + } + if err := p.Host.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:host: ", p), err) + } + } + return err +} + +func (p *BinaryAnnotation) Equals(other *BinaryAnnotation) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Key != other.Key { + return false + } + if bytes.Compare(p.Value, other.Value) != 0 { + return false + } + if p.AnnotationType != other.AnnotationType { + return false + } + if !p.Host.Equals(other.Host) { + return false + } + return true +} + +func (p *BinaryAnnotation) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BinaryAnnotation(%+v)", *p) +} + +// A trace is a series of spans (often RPC calls) which form a latency tree. +// +// The root span is where trace_id = id and parent_id = Nil. The root span is +// usually the longest interval in the trace, starting with a SERVER_RECV +// annotation and ending with a SERVER_SEND. +// +// Attributes: +// - TraceID +// - Name: Span name in lowercase, rpc method for example +// +// Conventionally, when the span name isn't known, name = "unknown". +// - ID +// - ParentID +// - Annotations +// - BinaryAnnotations +// - Debug +// - Timestamp: Microseconds from epoch of the creation of this span. +// +// This value should be set directly by instrumentation, using the most +// precise value possible. For example, gettimeofday or syncing nanoTime +// against a tick of currentTimeMillis. +// +// For compatibility with instrumentation that precede this field, collectors +// or span stores can derive this via Annotation.timestamp. +// For example, SERVER_RECV.timestamp or CLIENT_SEND.timestamp. +// +// This field is optional for compatibility with old data: first-party span +// stores are expected to support this at time of introduction. +// - Duration: Measurement of duration in microseconds, used to support queries. +// +// This value should be set directly, where possible. Doing so encourages +// precise measurement decoupled from problems of clocks, such as skew or NTP +// updates causing time to move backwards. +// +// For compatibility with instrumentation that precede this field, collectors +// or span stores can derive this by subtracting Annotation.timestamp. +// For example, SERVER_SEND.timestamp - SERVER_RECV.timestamp. +// +// If this field is persisted as unset, zipkin will continue to work, except +// duration query support will be implementation-specific. Similarly, setting +// this field non-atomically is implementation-specific. +// +// This field is i64 vs i32 to support spans longer than 35 minutes. +// - TraceIDHigh: Optional unique 8-byte additional identifier for a trace. If non zero, this +// means the trace uses 128 bit traceIds instead of 64 bit. +type Span struct { + TraceID int64 `thrift:"trace_id,1" db:"trace_id" json:"trace_id"` + // unused field # 2 + Name string `thrift:"name,3" db:"name" json:"name"` + ID int64 `thrift:"id,4" db:"id" json:"id"` + ParentID *int64 `thrift:"parent_id,5" db:"parent_id" json:"parent_id,omitempty"` + Annotations []*Annotation `thrift:"annotations,6" db:"annotations" json:"annotations"` + // unused field # 7 + BinaryAnnotations []*BinaryAnnotation `thrift:"binary_annotations,8" db:"binary_annotations" json:"binary_annotations"` + Debug bool `thrift:"debug,9" db:"debug" json:"debug"` + Timestamp *int64 `thrift:"timestamp,10" db:"timestamp" json:"timestamp,omitempty"` + Duration *int64 `thrift:"duration,11" db:"duration" json:"duration,omitempty"` + TraceIDHigh *int64 `thrift:"trace_id_high,12" db:"trace_id_high" json:"trace_id_high,omitempty"` +} + +func NewSpan() *Span { + return &Span{} +} + +func (p *Span) GetTraceID() int64 { + return p.TraceID +} + +func (p *Span) GetName() string { + return p.Name +} + +func (p *Span) GetID() int64 { + return p.ID +} + +var Span_ParentID_DEFAULT int64 + +func (p *Span) GetParentID() int64 { + if !p.IsSetParentID() { + return Span_ParentID_DEFAULT + } + return *p.ParentID +} + +func (p *Span) GetAnnotations() []*Annotation { + return p.Annotations +} + +func (p *Span) GetBinaryAnnotations() []*BinaryAnnotation { + return p.BinaryAnnotations +} + +var Span_Debug_DEFAULT bool = false + +func (p *Span) GetDebug() bool { + return p.Debug +} + +var Span_Timestamp_DEFAULT int64 + +func (p *Span) GetTimestamp() int64 { + if !p.IsSetTimestamp() { + return Span_Timestamp_DEFAULT + } + return *p.Timestamp +} + +var Span_Duration_DEFAULT int64 + +func (p *Span) GetDuration() int64 { + if !p.IsSetDuration() { + return Span_Duration_DEFAULT + } + return *p.Duration +} + +var Span_TraceIDHigh_DEFAULT int64 + +func (p *Span) GetTraceIDHigh() int64 { + if !p.IsSetTraceIDHigh() { + return Span_TraceIDHigh_DEFAULT + } + return *p.TraceIDHigh +} +func (p *Span) IsSetParentID() bool { + return p.ParentID != nil +} + +func (p *Span) IsSetDebug() bool { + return p.Debug != Span_Debug_DEFAULT +} + +func (p *Span) IsSetTimestamp() bool { + return p.Timestamp != nil +} + +func (p *Span) IsSetDuration() bool { + return p.Duration != nil +} + +func (p *Span) IsSetTraceIDHigh() bool { + return p.TraceIDHigh != nil +} + +func (p *Span) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I64 { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.LIST { + if err := p.ReadField6(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.LIST { + if err := p.ReadField8(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField9(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 10: + if fieldTypeId == thrift.I64 { + if err := p.ReadField10(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 11: + if fieldTypeId == thrift.I64 { + if err := p.ReadField11(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 12: + if fieldTypeId == thrift.I64 { + if err := p.ReadField12(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *Span) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.TraceID = v + } + return nil +} + +func (p *Span) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.Name = v + } + return nil +} + +func (p *Span) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.ID = v + } + return nil +} + +func (p *Span) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.ParentID = &v + } + return nil +} + +func (p *Span) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Annotation, 0, size) + p.Annotations = tSlice + for i := 0; i < size; i++ { + _elem0 := &Annotation{} + if err := _elem0.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) + } + p.Annotations = append(p.Annotations, _elem0) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Span) ReadField8(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*BinaryAnnotation, 0, size) + p.BinaryAnnotations = tSlice + for i := 0; i < size; i++ { + _elem1 := &BinaryAnnotation{} + if err := _elem1.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err) + } + p.BinaryAnnotations = append(p.BinaryAnnotations, _elem1) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Span) ReadField9(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 9: ", err) + } else { + p.Debug = v + } + return nil +} + +func (p *Span) ReadField10(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 10: ", err) + } else { + p.Timestamp = &v + } + return nil +} + +func (p *Span) ReadField11(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 11: ", err) + } else { + p.Duration = &v + } + return nil +} + +func (p *Span) ReadField12(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 12: ", err) + } else { + p.TraceIDHigh = &v + } + return nil +} + +func (p *Span) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Span"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { + return err + } + if err := p.writeField4(ctx, oprot); err != nil { + return err + } + if err := p.writeField5(ctx, oprot); err != nil { + return err + } + if err := p.writeField6(ctx, oprot); err != nil { + return err + } + if err := p.writeField8(ctx, oprot); err != nil { + return err + } + if err := p.writeField9(ctx, oprot); err != nil { + return err + } + if err := p.writeField10(ctx, oprot); err != nil { + return err + } + if err := p.writeField11(ctx, oprot); err != nil { + return err + } + if err := p.writeField12(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Span) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "trace_id", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:trace_id: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.TraceID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.trace_id (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:trace_id: ", p), err) + } + return err +} + +func (p *Span) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "name", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:name: ", p), err) + } + if err := oprot.WriteString(ctx, string(p.Name)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.name (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:name: ", p), err) + } + return err +} + +func (p *Span) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "id", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:id: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.ID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.id (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:id: ", p), err) + } + return err +} + +func (p *Span) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetParentID() { + if err := oprot.WriteFieldBegin(ctx, "parent_id", thrift.I64, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:parent_id: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(*p.ParentID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.parent_id (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:parent_id: ", p), err) + } + } + return err +} + +func (p *Span) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "annotations", thrift.LIST, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:annotations: ", p), err) + } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Annotations)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Annotations { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:annotations: ", p), err) + } + return err +} + +func (p *Span) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "binary_annotations", thrift.LIST, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:binary_annotations: ", p), err) + } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.BinaryAnnotations)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.BinaryAnnotations { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:binary_annotations: ", p), err) + } + return err +} + +func (p *Span) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDebug() { + if err := oprot.WriteFieldBegin(ctx, "debug", thrift.BOOL, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:debug: ", p), err) + } + if err := oprot.WriteBool(ctx, bool(p.Debug)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.debug (9) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:debug: ", p), err) + } + } + return err +} + +func (p *Span) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetTimestamp() { + if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 10); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:timestamp: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(*p.Timestamp)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.timestamp (10) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 10:timestamp: ", p), err) + } + } + return err +} + +func (p *Span) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDuration() { + if err := oprot.WriteFieldBegin(ctx, "duration", thrift.I64, 11); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:duration: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(*p.Duration)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.duration (11) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 11:duration: ", p), err) + } + } + return err +} + +func (p *Span) writeField12(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetTraceIDHigh() { + if err := oprot.WriteFieldBegin(ctx, "trace_id_high", thrift.I64, 12); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:trace_id_high: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(*p.TraceIDHigh)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.trace_id_high (12) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 12:trace_id_high: ", p), err) + } + } + return err +} + +func (p *Span) Equals(other *Span) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.TraceID != other.TraceID { + return false + } + if p.Name != other.Name { + return false + } + if p.ID != other.ID { + return false + } + if p.ParentID != other.ParentID { + if p.ParentID == nil || other.ParentID == nil { + return false + } + if (*p.ParentID) != (*other.ParentID) { + return false + } + } + if len(p.Annotations) != len(other.Annotations) { + return false + } + for i, _tgt := range p.Annotations { + _src2 := other.Annotations[i] + if !_tgt.Equals(_src2) { + return false + } + } + if len(p.BinaryAnnotations) != len(other.BinaryAnnotations) { + return false + } + for i, _tgt := range p.BinaryAnnotations { + _src3 := other.BinaryAnnotations[i] + if !_tgt.Equals(_src3) { + return false + } + } + if p.Debug != other.Debug { + return false + } + if p.Timestamp != other.Timestamp { + if p.Timestamp == nil || other.Timestamp == nil { + return false + } + if (*p.Timestamp) != (*other.Timestamp) { + return false + } + } + if p.Duration != other.Duration { + if p.Duration == nil || other.Duration == nil { + return false + } + if (*p.Duration) != (*other.Duration) { + return false + } + } + if p.TraceIDHigh != other.TraceIDHigh { + if p.TraceIDHigh == nil || other.TraceIDHigh == nil { + return false + } + if (*p.TraceIDHigh) != (*other.TraceIDHigh) { + return false + } + } + return true +} + +func (p *Span) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Span(%+v)", *p) +} + +// Attributes: +// - Ok +type Response struct { + Ok bool `thrift:"ok,1,required" db:"ok" json:"ok"` +} + +func NewResponse() *Response { + return &Response{} +} + +func (p *Response) GetOk() bool { + return p.Ok +} +func (p *Response) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetOk bool = false + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetOk = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetOk { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set")) + } + return nil +} + +func (p *Response) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Ok = v + } + return nil +} + +func (p *Response) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Response) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "ok", thrift.BOOL, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err) + } + if err := oprot.WriteBool(ctx, bool(p.Ok)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err) + } + return err +} + +func (p *Response) Equals(other *Response) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Ok != other.Ok { + return false + } + return true +} + +func (p *Response) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Response(%+v)", *p) +} + +type ZipkinCollector interface { + // Parameters: + // - Spans + SubmitZipkinBatch(ctx context.Context, spans []*Span) (_r []*Response, _err error) +} + +type ZipkinCollectorClient struct { + c thrift.TClient + meta thrift.ResponseMeta +} + +func NewZipkinCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *ZipkinCollectorClient { + return &ZipkinCollectorClient{ + c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), + } +} + +func NewZipkinCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *ZipkinCollectorClient { + return &ZipkinCollectorClient{ + c: thrift.NewTStandardClient(iprot, oprot), + } +} + +func NewZipkinCollectorClient(c thrift.TClient) *ZipkinCollectorClient { + return &ZipkinCollectorClient{ + c: c, + } +} + +func (p *ZipkinCollectorClient) Client_() thrift.TClient { + return p.c +} + +func (p *ZipkinCollectorClient) LastResponseMeta_() thrift.ResponseMeta { + return p.meta +} + +func (p *ZipkinCollectorClient) SetLastResponseMeta_(meta thrift.ResponseMeta) { + p.meta = meta +} + +// Parameters: +// - Spans +func (p *ZipkinCollectorClient) SubmitZipkinBatch(ctx context.Context, spans []*Span) (_r []*Response, _err error) { + var _args4 ZipkinCollectorSubmitZipkinBatchArgs + _args4.Spans = spans + var _result6 ZipkinCollectorSubmitZipkinBatchResult + var _meta5 thrift.ResponseMeta + _meta5, _err = p.Client_().Call(ctx, "submitZipkinBatch", &_args4, &_result6) + p.SetLastResponseMeta_(_meta5) + if _err != nil { + return + } + return _result6.GetSuccess(), nil +} + +type ZipkinCollectorProcessor struct { + processorMap map[string]thrift.TProcessorFunction + handler ZipkinCollector +} + +func (p *ZipkinCollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { + p.processorMap[key] = processor +} + +func (p *ZipkinCollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { + processor, ok = p.processorMap[key] + return processor, ok +} + +func (p *ZipkinCollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { + return p.processorMap +} + +func NewZipkinCollectorProcessor(handler ZipkinCollector) *ZipkinCollectorProcessor { + + self7 := &ZipkinCollectorProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} + self7.processorMap["submitZipkinBatch"] = &zipkinCollectorProcessorSubmitZipkinBatch{handler: handler} + return self7 +} + +func (p *ZipkinCollectorProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + name, _, seqId, err2 := iprot.ReadMessageBegin(ctx) + if err2 != nil { + return false, thrift.WrapTException(err2) + } + if processor, ok := p.GetProcessorFunction(name); ok { + return processor.Process(ctx, seqId, iprot, oprot) + } + iprot.Skip(ctx, thrift.STRUCT) + iprot.ReadMessageEnd(ctx) + x8 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) + oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId) + x8.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, x8 + +} + +type zipkinCollectorProcessorSubmitZipkinBatch struct { + handler ZipkinCollector +} + +func (p *zipkinCollectorProcessorSubmitZipkinBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := ZipkinCollectorSubmitZipkinBatchArgs{} + var err2 error + if err2 = args.Read(ctx, iprot); err2 != nil { + iprot.ReadMessageEnd(ctx) + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) + oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, thrift.WrapTException(err2) + } + iprot.ReadMessageEnd(ctx) + + tickerCancel := func() {} + // Start a goroutine to do server side connectivity check. + if thrift.ServerConnectivityCheckInterval > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + defer cancel() + var tickerCtx context.Context + tickerCtx, tickerCancel = context.WithCancel(context.Background()) + defer tickerCancel() + go func(ctx context.Context, cancel context.CancelFunc) { + ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if !iprot.Transport().IsOpen() { + cancel() + return + } + } + } + }(tickerCtx, cancel) + } + + result := ZipkinCollectorSubmitZipkinBatchResult{} + var retval []*Response + if retval, err2 = p.handler.SubmitZipkinBatch(ctx, args.Spans); err2 != nil { + tickerCancel() + if err2 == thrift.ErrAbandonRequest { + return false, thrift.WrapTException(err2) + } + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitZipkinBatch: "+err2.Error()) + oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return true, thrift.WrapTException(err2) + } else { + result.Success = retval + } + tickerCancel() + if err2 = oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.REPLY, seqId); err2 != nil { + err = thrift.WrapTException(err2) + } + if err2 = result.Write(ctx, oprot); err == nil && err2 != nil { + err = thrift.WrapTException(err2) + } + if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil { + err = thrift.WrapTException(err2) + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = thrift.WrapTException(err2) + } + if err != nil { + return + } + return true, err +} + +// HELPER FUNCTIONS AND STRUCTURES + +// Attributes: +// - Spans +type ZipkinCollectorSubmitZipkinBatchArgs struct { + Spans []*Span `thrift:"spans,1" db:"spans" json:"spans"` +} + +func NewZipkinCollectorSubmitZipkinBatchArgs() *ZipkinCollectorSubmitZipkinBatchArgs { + return &ZipkinCollectorSubmitZipkinBatchArgs{} +} + +func (p *ZipkinCollectorSubmitZipkinBatchArgs) GetSpans() []*Span { + return p.Spans +} +func (p *ZipkinCollectorSubmitZipkinBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ZipkinCollectorSubmitZipkinBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Span, 0, size) + p.Spans = tSlice + for i := 0; i < size; i++ { + _elem9 := &Span{} + if err := _elem9.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem9), err) + } + p.Spans = append(p.Spans, _elem9) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ZipkinCollectorSubmitZipkinBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "submitZipkinBatch_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ZipkinCollectorSubmitZipkinBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err) + } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Spans { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err) + } + return err +} + +func (p *ZipkinCollectorSubmitZipkinBatchArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchArgs(%+v)", *p) +} + +// Attributes: +// - Success +type ZipkinCollectorSubmitZipkinBatchResult struct { + Success []*Response `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewZipkinCollectorSubmitZipkinBatchResult() *ZipkinCollectorSubmitZipkinBatchResult { + return &ZipkinCollectorSubmitZipkinBatchResult{} +} + +var ZipkinCollectorSubmitZipkinBatchResult_Success_DEFAULT []*Response + +func (p *ZipkinCollectorSubmitZipkinBatchResult) GetSuccess() []*Response { + return p.Success +} +func (p *ZipkinCollectorSubmitZipkinBatchResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *ZipkinCollectorSubmitZipkinBatchResult) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.LIST { + if err := p.ReadField0(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ZipkinCollectorSubmitZipkinBatchResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Response, 0, size) + p.Success = tSlice + for i := 0; i < size; i++ { + _elem10 := &Response{} + if err := _elem10.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem10), err) + } + p.Success = append(p.Success, _elem10) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ZipkinCollectorSubmitZipkinBatchResult) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "submitZipkinBatch_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ZipkinCollectorSubmitZipkinBatchResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin(ctx, "success", thrift.LIST, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Success)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Success { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *ZipkinCollectorSubmitZipkinBatchResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchResult(%+v)", *p) +} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/LICENSE b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/LICENSE new file mode 100644 index 000000000..2bc6fbbf6 --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/LICENSE @@ -0,0 +1,306 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------- +SOFTWARE DISTRIBUTED WITH THRIFT: + +The Apache Thrift software includes a number of subcomponents with +separate copyright notices and license terms. Your use of the source +code for the these subcomponents is subject to the terms and +conditions of the following licenses. + +-------------------------------------------------- +Portions of the following files are licensed under the MIT License: + + lib/erl/src/Makefile.am + +Please see doc/otp-base-license.txt for the full terms of this license. + +-------------------------------------------------- +For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components: + +# Copyright (c) 2007 Thomas Porschberg +# +# Copying and distribution of this file, with or without +# modification, are permitted in any medium without royalty provided +# the copyright notice and this notice are preserved. + +-------------------------------------------------- +For the lib/nodejs/lib/thrift/json_parse.js: + +/* + json_parse.js + 2015-05-02 + Public Domain. + NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. + +*/ +(By Douglas Crockford ) + +-------------------------------------------------- +For lib/cpp/src/thrift/windows/SocketPair.cpp + +/* socketpair.c + * Copyright 2007 by Nathan C. Myers ; some rights reserved. + * This code is Free Software. It may be copied freely, in original or + * modified form, subject only to the restrictions that (1) the author is + * relieved from all responsibilities for any use for any purpose, and (2) + * this copyright notice must be retained, unchanged, in its entirety. If + * for any reason the author might be held responsible for any consequences + * of copying or use, license is withheld. + */ + + +-------------------------------------------------- +For lib/py/compat/win32/stdint.h + +// ISO C9x compliant stdint.h for Microsoft Visual Studio +// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 +// +// Copyright (c) 2006-2008 Alexander Chemeris +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// 3. The name of the author may be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED +// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +/////////////////////////////////////////////////////////////////////////////// + + +-------------------------------------------------- +Codegen template in t_html_generator.h + +* Bootstrap v2.0.3 +* +* Copyright 2012 Twitter, Inc +* Licensed under the Apache License v2.0 +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Designed and built with all the love in the world @twitter by @mdo and @fat. + +--------------------------------------------------- +For t_cl_generator.cc + + * Copyright (c) 2008- Patrick Collison + * Copyright (c) 2006- Facebook + +--------------------------------------------------- diff --git a/src/runtime/vendor/github.com/apache/thrift/NOTICE b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/NOTICE similarity index 100% rename from src/runtime/vendor/github.com/apache/thrift/NOTICE rename to src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/NOTICE diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/application_exception.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/application_exception.go similarity index 71% rename from src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/application_exception.go rename to src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/application_exception.go index 0023c57cf..32d5b0147 100644 --- a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/application_exception.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/application_exception.go @@ -19,6 +19,10 @@ package thrift +import ( + "context" +) + const ( UNKNOWN_APPLICATION_EXCEPTION = 0 UNKNOWN_METHOD = 1 @@ -51,8 +55,8 @@ var defaultApplicationExceptionMessage = map[int32]string{ type TApplicationException interface { TException TypeId() int32 - Read(iprot TProtocol) error - Write(oprot TProtocol) error + Read(ctx context.Context, iprot TProtocol) error + Write(ctx context.Context, oprot TProtocol) error } type tApplicationException struct { @@ -60,6 +64,12 @@ type tApplicationException struct { type_ int32 } +var _ TApplicationException = (*tApplicationException)(nil) + +func (tApplicationException) TExceptionType() TExceptionType { + return TExceptionTypeApplication +} + func (e tApplicationException) Error() string { if e.message != "" { return e.message @@ -75,9 +85,9 @@ func (p *tApplicationException) TypeId() int32 { return p.type_ } -func (p *tApplicationException) Read(iprot TProtocol) error { +func (p *tApplicationException) Read(ctx context.Context, iprot TProtocol) error { // TODO: this should really be generated by the compiler - _, err := iprot.ReadStructBegin() + _, err := iprot.ReadStructBegin(ctx) if err != nil { return err } @@ -86,7 +96,7 @@ func (p *tApplicationException) Read(iprot TProtocol) error { type_ := int32(UNKNOWN_APPLICATION_EXCEPTION) for { - _, ttype, id, err := iprot.ReadFieldBegin() + _, ttype, id, err := iprot.ReadFieldBegin(ctx) if err != nil { return err } @@ -96,34 +106,34 @@ func (p *tApplicationException) Read(iprot TProtocol) error { switch id { case 1: if ttype == STRING { - if message, err = iprot.ReadString(); err != nil { + if message, err = iprot.ReadString(ctx); err != nil { return err } } else { - if err = SkipDefaultDepth(iprot, ttype); err != nil { + if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil { return err } } case 2: if ttype == I32 { - if type_, err = iprot.ReadI32(); err != nil { + if type_, err = iprot.ReadI32(ctx); err != nil { return err } } else { - if err = SkipDefaultDepth(iprot, ttype); err != nil { + if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil { return err } } default: - if err = SkipDefaultDepth(iprot, ttype); err != nil { + if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil { return err } } - if err = iprot.ReadFieldEnd(); err != nil { + if err = iprot.ReadFieldEnd(ctx); err != nil { return err } } - if err := iprot.ReadStructEnd(); err != nil { + if err := iprot.ReadStructEnd(ctx); err != nil { return err } @@ -133,38 +143,38 @@ func (p *tApplicationException) Read(iprot TProtocol) error { return nil } -func (p *tApplicationException) Write(oprot TProtocol) (err error) { - err = oprot.WriteStructBegin("TApplicationException") +func (p *tApplicationException) Write(ctx context.Context, oprot TProtocol) (err error) { + err = oprot.WriteStructBegin(ctx, "TApplicationException") if len(p.Error()) > 0 { - err = oprot.WriteFieldBegin("message", STRING, 1) + err = oprot.WriteFieldBegin(ctx, "message", STRING, 1) if err != nil { return } - err = oprot.WriteString(p.Error()) + err = oprot.WriteString(ctx, p.Error()) if err != nil { return } - err = oprot.WriteFieldEnd() + err = oprot.WriteFieldEnd(ctx) if err != nil { return } } - err = oprot.WriteFieldBegin("type", I32, 2) + err = oprot.WriteFieldBegin(ctx, "type", I32, 2) if err != nil { return } - err = oprot.WriteI32(p.type_) + err = oprot.WriteI32(ctx, p.type_) if err != nil { return } - err = oprot.WriteFieldEnd() + err = oprot.WriteFieldEnd(ctx) if err != nil { return } - err = oprot.WriteFieldStop() + err = oprot.WriteFieldStop(ctx) if err != nil { return } - err = oprot.WriteStructEnd() + err = oprot.WriteStructEnd(ctx) return } diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/binary_protocol.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/binary_protocol.go new file mode 100644 index 000000000..45c880d32 --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/binary_protocol.go @@ -0,0 +1,555 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bytes" + "context" + "encoding/binary" + "errors" + "fmt" + "io" + "math" +) + +type TBinaryProtocol struct { + trans TRichTransport + origTransport TTransport + cfg *TConfiguration + buffer [64]byte +} + +type TBinaryProtocolFactory struct { + cfg *TConfiguration +} + +// Deprecated: Use NewTBinaryProtocolConf instead. +func NewTBinaryProtocolTransport(t TTransport) *TBinaryProtocol { + return NewTBinaryProtocolConf(t, &TConfiguration{ + noPropagation: true, + }) +} + +// Deprecated: Use NewTBinaryProtocolConf instead. +func NewTBinaryProtocol(t TTransport, strictRead, strictWrite bool) *TBinaryProtocol { + return NewTBinaryProtocolConf(t, &TConfiguration{ + TBinaryStrictRead: &strictRead, + TBinaryStrictWrite: &strictWrite, + + noPropagation: true, + }) +} + +func NewTBinaryProtocolConf(t TTransport, conf *TConfiguration) *TBinaryProtocol { + PropagateTConfiguration(t, conf) + p := &TBinaryProtocol{ + origTransport: t, + cfg: conf, + } + if et, ok := t.(TRichTransport); ok { + p.trans = et + } else { + p.trans = NewTRichTransport(t) + } + return p +} + +// Deprecated: Use NewTBinaryProtocolFactoryConf instead. +func NewTBinaryProtocolFactoryDefault() *TBinaryProtocolFactory { + return NewTBinaryProtocolFactoryConf(&TConfiguration{ + noPropagation: true, + }) +} + +// Deprecated: Use NewTBinaryProtocolFactoryConf instead. +func NewTBinaryProtocolFactory(strictRead, strictWrite bool) *TBinaryProtocolFactory { + return NewTBinaryProtocolFactoryConf(&TConfiguration{ + TBinaryStrictRead: &strictRead, + TBinaryStrictWrite: &strictWrite, + + noPropagation: true, + }) +} + +func NewTBinaryProtocolFactoryConf(conf *TConfiguration) *TBinaryProtocolFactory { + return &TBinaryProtocolFactory{ + cfg: conf, + } +} + +func (p *TBinaryProtocolFactory) GetProtocol(t TTransport) TProtocol { + return NewTBinaryProtocolConf(t, p.cfg) +} + +func (p *TBinaryProtocolFactory) SetTConfiguration(conf *TConfiguration) { + p.cfg = conf +} + +/** + * Writing Methods + */ + +func (p *TBinaryProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error { + if p.cfg.GetTBinaryStrictWrite() { + version := uint32(VERSION_1) | uint32(typeId) + e := p.WriteI32(ctx, int32(version)) + if e != nil { + return e + } + e = p.WriteString(ctx, name) + if e != nil { + return e + } + e = p.WriteI32(ctx, seqId) + return e + } else { + e := p.WriteString(ctx, name) + if e != nil { + return e + } + e = p.WriteByte(ctx, int8(typeId)) + if e != nil { + return e + } + e = p.WriteI32(ctx, seqId) + return e + } + return nil +} + +func (p *TBinaryProtocol) WriteMessageEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) WriteStructBegin(ctx context.Context, name string) error { + return nil +} + +func (p *TBinaryProtocol) WriteStructEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error { + e := p.WriteByte(ctx, int8(typeId)) + if e != nil { + return e + } + e = p.WriteI16(ctx, id) + return e +} + +func (p *TBinaryProtocol) WriteFieldEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) WriteFieldStop(ctx context.Context) error { + e := p.WriteByte(ctx, STOP) + return e +} + +func (p *TBinaryProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { + e := p.WriteByte(ctx, int8(keyType)) + if e != nil { + return e + } + e = p.WriteByte(ctx, int8(valueType)) + if e != nil { + return e + } + e = p.WriteI32(ctx, int32(size)) + return e +} + +func (p *TBinaryProtocol) WriteMapEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { + e := p.WriteByte(ctx, int8(elemType)) + if e != nil { + return e + } + e = p.WriteI32(ctx, int32(size)) + return e +} + +func (p *TBinaryProtocol) WriteListEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { + e := p.WriteByte(ctx, int8(elemType)) + if e != nil { + return e + } + e = p.WriteI32(ctx, int32(size)) + return e +} + +func (p *TBinaryProtocol) WriteSetEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) WriteBool(ctx context.Context, value bool) error { + if value { + return p.WriteByte(ctx, 1) + } + return p.WriteByte(ctx, 0) +} + +func (p *TBinaryProtocol) WriteByte(ctx context.Context, value int8) error { + e := p.trans.WriteByte(byte(value)) + return NewTProtocolException(e) +} + +func (p *TBinaryProtocol) WriteI16(ctx context.Context, value int16) error { + v := p.buffer[0:2] + binary.BigEndian.PutUint16(v, uint16(value)) + _, e := p.trans.Write(v) + return NewTProtocolException(e) +} + +func (p *TBinaryProtocol) WriteI32(ctx context.Context, value int32) error { + v := p.buffer[0:4] + binary.BigEndian.PutUint32(v, uint32(value)) + _, e := p.trans.Write(v) + return NewTProtocolException(e) +} + +func (p *TBinaryProtocol) WriteI64(ctx context.Context, value int64) error { + v := p.buffer[0:8] + binary.BigEndian.PutUint64(v, uint64(value)) + _, err := p.trans.Write(v) + return NewTProtocolException(err) +} + +func (p *TBinaryProtocol) WriteDouble(ctx context.Context, value float64) error { + return p.WriteI64(ctx, int64(math.Float64bits(value))) +} + +func (p *TBinaryProtocol) WriteString(ctx context.Context, value string) error { + e := p.WriteI32(ctx, int32(len(value))) + if e != nil { + return e + } + _, err := p.trans.WriteString(value) + return NewTProtocolException(err) +} + +func (p *TBinaryProtocol) WriteBinary(ctx context.Context, value []byte) error { + e := p.WriteI32(ctx, int32(len(value))) + if e != nil { + return e + } + _, err := p.trans.Write(value) + return NewTProtocolException(err) +} + +/** + * Reading methods + */ + +func (p *TBinaryProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) { + size, e := p.ReadI32(ctx) + if e != nil { + return "", typeId, 0, NewTProtocolException(e) + } + if size < 0 { + typeId = TMessageType(size & 0x0ff) + version := int64(int64(size) & VERSION_MASK) + if version != VERSION_1 { + return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Bad version in ReadMessageBegin")) + } + name, e = p.ReadString(ctx) + if e != nil { + return name, typeId, seqId, NewTProtocolException(e) + } + seqId, e = p.ReadI32(ctx) + if e != nil { + return name, typeId, seqId, NewTProtocolException(e) + } + return name, typeId, seqId, nil + } + if p.cfg.GetTBinaryStrictRead() { + return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Missing version in ReadMessageBegin")) + } + name, e2 := p.readStringBody(size) + if e2 != nil { + return name, typeId, seqId, e2 + } + b, e3 := p.ReadByte(ctx) + if e3 != nil { + return name, typeId, seqId, e3 + } + typeId = TMessageType(b) + seqId, e4 := p.ReadI32(ctx) + if e4 != nil { + return name, typeId, seqId, e4 + } + return name, typeId, seqId, nil +} + +func (p *TBinaryProtocol) ReadMessageEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { + return +} + +func (p *TBinaryProtocol) ReadStructEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, seqId int16, err error) { + t, err := p.ReadByte(ctx) + typeId = TType(t) + if err != nil { + return name, typeId, seqId, err + } + if t != STOP { + seqId, err = p.ReadI16(ctx) + } + return name, typeId, seqId, err +} + +func (p *TBinaryProtocol) ReadFieldEnd(ctx context.Context) error { + return nil +} + +var invalidDataLength = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Invalid data length")) + +func (p *TBinaryProtocol) ReadMapBegin(ctx context.Context) (kType, vType TType, size int, err error) { + k, e := p.ReadByte(ctx) + if e != nil { + err = NewTProtocolException(e) + return + } + kType = TType(k) + v, e := p.ReadByte(ctx) + if e != nil { + err = NewTProtocolException(e) + return + } + vType = TType(v) + size32, e := p.ReadI32(ctx) + if e != nil { + err = NewTProtocolException(e) + return + } + if size32 < 0 { + err = invalidDataLength + return + } + size = int(size32) + return kType, vType, size, nil +} + +func (p *TBinaryProtocol) ReadMapEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) { + b, e := p.ReadByte(ctx) + if e != nil { + err = NewTProtocolException(e) + return + } + elemType = TType(b) + size32, e := p.ReadI32(ctx) + if e != nil { + err = NewTProtocolException(e) + return + } + if size32 < 0 { + err = invalidDataLength + return + } + size = int(size32) + + return +} + +func (p *TBinaryProtocol) ReadListEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) { + b, e := p.ReadByte(ctx) + if e != nil { + err = NewTProtocolException(e) + return + } + elemType = TType(b) + size32, e := p.ReadI32(ctx) + if e != nil { + err = NewTProtocolException(e) + return + } + if size32 < 0 { + err = invalidDataLength + return + } + size = int(size32) + return elemType, size, nil +} + +func (p *TBinaryProtocol) ReadSetEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) ReadBool(ctx context.Context) (bool, error) { + b, e := p.ReadByte(ctx) + v := true + if b != 1 { + v = false + } + return v, e +} + +func (p *TBinaryProtocol) ReadByte(ctx context.Context) (int8, error) { + v, err := p.trans.ReadByte() + return int8(v), err +} + +func (p *TBinaryProtocol) ReadI16(ctx context.Context) (value int16, err error) { + buf := p.buffer[0:2] + err = p.readAll(ctx, buf) + value = int16(binary.BigEndian.Uint16(buf)) + return value, err +} + +func (p *TBinaryProtocol) ReadI32(ctx context.Context) (value int32, err error) { + buf := p.buffer[0:4] + err = p.readAll(ctx, buf) + value = int32(binary.BigEndian.Uint32(buf)) + return value, err +} + +func (p *TBinaryProtocol) ReadI64(ctx context.Context) (value int64, err error) { + buf := p.buffer[0:8] + err = p.readAll(ctx, buf) + value = int64(binary.BigEndian.Uint64(buf)) + return value, err +} + +func (p *TBinaryProtocol) ReadDouble(ctx context.Context) (value float64, err error) { + buf := p.buffer[0:8] + err = p.readAll(ctx, buf) + value = math.Float64frombits(binary.BigEndian.Uint64(buf)) + return value, err +} + +func (p *TBinaryProtocol) ReadString(ctx context.Context) (value string, err error) { + size, e := p.ReadI32(ctx) + if e != nil { + return "", e + } + err = checkSizeForProtocol(size, p.cfg) + if err != nil { + return + } + if size < 0 { + err = invalidDataLength + return + } + if size == 0 { + return "", nil + } + if size < int32(len(p.buffer)) { + // Avoid allocation on small reads + buf := p.buffer[:size] + read, e := io.ReadFull(p.trans, buf) + return string(buf[:read]), NewTProtocolException(e) + } + + return p.readStringBody(size) +} + +func (p *TBinaryProtocol) ReadBinary(ctx context.Context) ([]byte, error) { + size, e := p.ReadI32(ctx) + if e != nil { + return nil, e + } + if err := checkSizeForProtocol(size, p.cfg); err != nil { + return nil, err + } + + buf, err := safeReadBytes(size, p.trans) + return buf, NewTProtocolException(err) +} + +func (p *TBinaryProtocol) Flush(ctx context.Context) (err error) { + return NewTProtocolException(p.trans.Flush(ctx)) +} + +func (p *TBinaryProtocol) Skip(ctx context.Context, fieldType TType) (err error) { + return SkipDefaultDepth(ctx, p, fieldType) +} + +func (p *TBinaryProtocol) Transport() TTransport { + return p.origTransport +} + +func (p *TBinaryProtocol) readAll(ctx context.Context, buf []byte) (err error) { + var read int + _, deadlineSet := ctx.Deadline() + for { + read, err = io.ReadFull(p.trans, buf) + if deadlineSet && read == 0 && isTimeoutError(err) && ctx.Err() == nil { + // This is I/O timeout without anything read, + // and we still have time left, keep retrying. + continue + } + // For anything else, don't retry + break + } + return NewTProtocolException(err) +} + +func (p *TBinaryProtocol) readStringBody(size int32) (value string, err error) { + buf, err := safeReadBytes(size, p.trans) + return string(buf), NewTProtocolException(err) +} + +func (p *TBinaryProtocol) SetTConfiguration(conf *TConfiguration) { + PropagateTConfiguration(p.trans, conf) + PropagateTConfiguration(p.origTransport, conf) + p.cfg = conf +} + +var ( + _ TConfigurationSetter = (*TBinaryProtocolFactory)(nil) + _ TConfigurationSetter = (*TBinaryProtocol)(nil) +) + +// This function is shared between TBinaryProtocol and TCompactProtocol. +// +// It tries to read size bytes from trans, in a way that prevents large +// allocations when size is insanely large (mostly caused by malformed message). +func safeReadBytes(size int32, trans io.Reader) ([]byte, error) { + if size < 0 { + return nil, nil + } + + buf := new(bytes.Buffer) + _, err := io.CopyN(buf, trans, int64(size)) + return buf.Bytes(), err +} diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/buffered_transport.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/buffered_transport.go similarity index 90% rename from src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/buffered_transport.go rename to src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/buffered_transport.go index 96702061b..aa551b4ab 100644 --- a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/buffered_transport.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/buffered_transport.go @@ -90,3 +90,10 @@ func (p *TBufferedTransport) Flush(ctx context.Context) error { func (p *TBufferedTransport) RemainingBytes() (num_bytes uint64) { return p.tp.RemainingBytes() } + +// SetTConfiguration implements TConfigurationSetter for propagation. +func (p *TBufferedTransport) SetTConfiguration(conf *TConfiguration) { + PropagateTConfiguration(p.tp, conf) +} + +var _ TConfigurationSetter = (*TBufferedTransport)(nil) diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/client.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/client.go similarity index 61% rename from src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/client.go rename to src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/client.go index b073a952d..ea2c01fda 100644 --- a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/client.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/client.go @@ -5,8 +5,15 @@ import ( "fmt" ) +// ResponseMeta represents the metadata attached to the response. +type ResponseMeta struct { + // The headers in the response, if any. + // If the underlying transport/protocol is not THeader, this will always be nil. + Headers THeaderMap +} + type TClient interface { - Call(ctx context.Context, method string, args, result TStruct) error + Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) } type TStandardClient struct { @@ -34,20 +41,20 @@ func (p *TStandardClient) Send(ctx context.Context, oprot TProtocol, seqId int32 } } - if err := oprot.WriteMessageBegin(method, CALL, seqId); err != nil { + if err := oprot.WriteMessageBegin(ctx, method, CALL, seqId); err != nil { return err } - if err := args.Write(oprot); err != nil { + if err := args.Write(ctx, oprot); err != nil { return err } - if err := oprot.WriteMessageEnd(); err != nil { + if err := oprot.WriteMessageEnd(ctx); err != nil { return err } return oprot.Flush(ctx) } -func (p *TStandardClient) Recv(iprot TProtocol, seqId int32, method string, result TStruct) error { - rMethod, rTypeId, rSeqId, err := iprot.ReadMessageBegin() +func (p *TStandardClient) Recv(ctx context.Context, iprot TProtocol, seqId int32, method string, result TStruct) error { + rMethod, rTypeId, rSeqId, err := iprot.ReadMessageBegin(ctx) if err != nil { return err } @@ -58,11 +65,11 @@ func (p *TStandardClient) Recv(iprot TProtocol, seqId int32, method string, resu return NewTApplicationException(BAD_SEQUENCE_ID, fmt.Sprintf("%s: out of order sequence response", method)) } else if rTypeId == EXCEPTION { var exception tApplicationException - if err := exception.Read(iprot); err != nil { + if err := exception.Read(ctx, iprot); err != nil { return err } - if err := iprot.ReadMessageEnd(); err != nil { + if err := iprot.ReadMessageEnd(ctx); err != nil { return err } @@ -71,25 +78,32 @@ func (p *TStandardClient) Recv(iprot TProtocol, seqId int32, method string, resu return NewTApplicationException(INVALID_MESSAGE_TYPE_EXCEPTION, fmt.Sprintf("%s: invalid message type", method)) } - if err := result.Read(iprot); err != nil { + if err := result.Read(ctx, iprot); err != nil { return err } - return iprot.ReadMessageEnd() + return iprot.ReadMessageEnd(ctx) } -func (p *TStandardClient) Call(ctx context.Context, method string, args, result TStruct) error { +func (p *TStandardClient) Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) { p.seqId++ seqId := p.seqId if err := p.Send(ctx, p.oprot, seqId, method, args); err != nil { - return err + return ResponseMeta{}, err } // method is oneway if result == nil { - return nil + return ResponseMeta{}, nil } - return p.Recv(p.iprot, seqId, method, result) + err := p.Recv(ctx, p.iprot, seqId, method, result) + var headers THeaderMap + if hp, ok := p.iprot.(*THeaderProtocol); ok { + headers = hp.transport.readHeaders + } + return ResponseMeta{ + Headers: headers, + }, err } diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/compact_protocol.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/compact_protocol.go similarity index 73% rename from src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/compact_protocol.go rename to src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/compact_protocol.go index 1900d50c3..a49225dab 100644 --- a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/compact_protocol.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/compact_protocol.go @@ -22,6 +22,7 @@ package thrift import ( "context" "encoding/binary" + "errors" "fmt" "io" "math" @@ -74,20 +75,37 @@ func init() { } } -type TCompactProtocolFactory struct{} +type TCompactProtocolFactory struct { + cfg *TConfiguration +} +// Deprecated: Use NewTCompactProtocolFactoryConf instead. func NewTCompactProtocolFactory() *TCompactProtocolFactory { - return &TCompactProtocolFactory{} + return NewTCompactProtocolFactoryConf(&TConfiguration{ + noPropagation: true, + }) +} + +func NewTCompactProtocolFactoryConf(conf *TConfiguration) *TCompactProtocolFactory { + return &TCompactProtocolFactory{ + cfg: conf, + } } func (p *TCompactProtocolFactory) GetProtocol(trans TTransport) TProtocol { - return NewTCompactProtocol(trans) + return NewTCompactProtocolConf(trans, p.cfg) +} + +func (p *TCompactProtocolFactory) SetTConfiguration(conf *TConfiguration) { + p.cfg = conf } type TCompactProtocol struct { trans TRichTransport origTransport TTransport + cfg *TConfiguration + // Used to keep track of the last field for the current and previous structs, // so we can do the delta stuff. lastField []int @@ -106,9 +124,19 @@ type TCompactProtocol struct { buffer [64]byte } -// Create a TCompactProtocol given a TTransport +// Deprecated: Use NewTCompactProtocolConf instead. func NewTCompactProtocol(trans TTransport) *TCompactProtocol { - p := &TCompactProtocol{origTransport: trans, lastField: []int{}} + return NewTCompactProtocolConf(trans, &TConfiguration{ + noPropagation: true, + }) +} + +func NewTCompactProtocolConf(trans TTransport, conf *TConfiguration) *TCompactProtocol { + PropagateTConfiguration(trans, conf) + p := &TCompactProtocol{ + origTransport: trans, + cfg: conf, + } if et, ok := trans.(TRichTransport); ok { p.trans = et } else { @@ -116,7 +144,6 @@ func NewTCompactProtocol(trans TTransport) *TCompactProtocol { } return p - } // @@ -125,7 +152,7 @@ func NewTCompactProtocol(trans TTransport) *TCompactProtocol { // Write a message header to the wire. Compact Protocol messages contain the // protocol version so we can migrate forwards in the future if need be. -func (p *TCompactProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error { +func (p *TCompactProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error { err := p.writeByteDirect(COMPACT_PROTOCOL_ID) if err != nil { return NewTProtocolException(err) @@ -138,17 +165,17 @@ func (p *TCompactProtocol) WriteMessageBegin(name string, typeId TMessageType, s if err != nil { return NewTProtocolException(err) } - e := p.WriteString(name) + e := p.WriteString(ctx, name) return e } -func (p *TCompactProtocol) WriteMessageEnd() error { return nil } +func (p *TCompactProtocol) WriteMessageEnd(ctx context.Context) error { return nil } // Write a struct begin. This doesn't actually put anything on the wire. We // use it as an opportunity to put special placeholder markers on the field // stack so we can get the field id deltas correct. -func (p *TCompactProtocol) WriteStructBegin(name string) error { +func (p *TCompactProtocol) WriteStructBegin(ctx context.Context, name string) error { p.lastField = append(p.lastField, p.lastFieldId) p.lastFieldId = 0 return nil @@ -157,26 +184,29 @@ func (p *TCompactProtocol) WriteStructBegin(name string) error { // Write a struct end. This doesn't actually put anything on the wire. We use // this as an opportunity to pop the last field from the current struct off // of the field stack. -func (p *TCompactProtocol) WriteStructEnd() error { +func (p *TCompactProtocol) WriteStructEnd(ctx context.Context) error { + if len(p.lastField) <= 0 { + return NewTProtocolExceptionWithType(INVALID_DATA, errors.New("WriteStructEnd called without matching WriteStructBegin call before")) + } p.lastFieldId = p.lastField[len(p.lastField)-1] p.lastField = p.lastField[:len(p.lastField)-1] return nil } -func (p *TCompactProtocol) WriteFieldBegin(name string, typeId TType, id int16) error { +func (p *TCompactProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error { if typeId == BOOL { // we want to possibly include the value, so we'll wait. p.booleanFieldName, p.booleanFieldId, p.booleanFieldPending = name, id, true return nil } - _, err := p.writeFieldBeginInternal(name, typeId, id, 0xFF) + _, err := p.writeFieldBeginInternal(ctx, name, typeId, id, 0xFF) return NewTProtocolException(err) } // The workhorse of writeFieldBegin. It has the option of doing a // 'type override' of the type header. This is used specifically in the // boolean field case. -func (p *TCompactProtocol) writeFieldBeginInternal(name string, typeId TType, id int16, typeOverride byte) (int, error) { +func (p *TCompactProtocol) writeFieldBeginInternal(ctx context.Context, name string, typeId TType, id int16, typeOverride byte) (int, error) { // short lastField = lastField_.pop(); // if there's a type override, use that. @@ -201,7 +231,7 @@ func (p *TCompactProtocol) writeFieldBeginInternal(name string, typeId TType, id if err != nil { return 0, err } - err = p.WriteI16(id) + err = p.WriteI16(ctx, id) written = 1 + 2 if err != nil { return 0, err @@ -209,18 +239,17 @@ func (p *TCompactProtocol) writeFieldBeginInternal(name string, typeId TType, id } p.lastFieldId = fieldId - // p.lastField.Push(field.id); return written, nil } -func (p *TCompactProtocol) WriteFieldEnd() error { return nil } +func (p *TCompactProtocol) WriteFieldEnd(ctx context.Context) error { return nil } -func (p *TCompactProtocol) WriteFieldStop() error { +func (p *TCompactProtocol) WriteFieldStop(ctx context.Context) error { err := p.writeByteDirect(STOP) return NewTProtocolException(err) } -func (p *TCompactProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error { +func (p *TCompactProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { if size == 0 { err := p.writeByteDirect(0) return NewTProtocolException(err) @@ -233,32 +262,32 @@ func (p *TCompactProtocol) WriteMapBegin(keyType TType, valueType TType, size in return NewTProtocolException(err) } -func (p *TCompactProtocol) WriteMapEnd() error { return nil } +func (p *TCompactProtocol) WriteMapEnd(ctx context.Context) error { return nil } // Write a list header. -func (p *TCompactProtocol) WriteListBegin(elemType TType, size int) error { +func (p *TCompactProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { _, err := p.writeCollectionBegin(elemType, size) return NewTProtocolException(err) } -func (p *TCompactProtocol) WriteListEnd() error { return nil } +func (p *TCompactProtocol) WriteListEnd(ctx context.Context) error { return nil } // Write a set header. -func (p *TCompactProtocol) WriteSetBegin(elemType TType, size int) error { +func (p *TCompactProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { _, err := p.writeCollectionBegin(elemType, size) return NewTProtocolException(err) } -func (p *TCompactProtocol) WriteSetEnd() error { return nil } +func (p *TCompactProtocol) WriteSetEnd(ctx context.Context) error { return nil } -func (p *TCompactProtocol) WriteBool(value bool) error { +func (p *TCompactProtocol) WriteBool(ctx context.Context, value bool) error { v := byte(COMPACT_BOOLEAN_FALSE) if value { v = byte(COMPACT_BOOLEAN_TRUE) } if p.booleanFieldPending { // we haven't written the field header yet - _, err := p.writeFieldBeginInternal(p.booleanFieldName, BOOL, p.booleanFieldId, v) + _, err := p.writeFieldBeginInternal(ctx, p.booleanFieldName, BOOL, p.booleanFieldId, v) p.booleanFieldPending = false return NewTProtocolException(err) } @@ -268,31 +297,31 @@ func (p *TCompactProtocol) WriteBool(value bool) error { } // Write a byte. Nothing to see here! -func (p *TCompactProtocol) WriteByte(value int8) error { +func (p *TCompactProtocol) WriteByte(ctx context.Context, value int8) error { err := p.writeByteDirect(byte(value)) return NewTProtocolException(err) } // Write an I16 as a zigzag varint. -func (p *TCompactProtocol) WriteI16(value int16) error { +func (p *TCompactProtocol) WriteI16(ctx context.Context, value int16) error { _, err := p.writeVarint32(p.int32ToZigzag(int32(value))) return NewTProtocolException(err) } // Write an i32 as a zigzag varint. -func (p *TCompactProtocol) WriteI32(value int32) error { +func (p *TCompactProtocol) WriteI32(ctx context.Context, value int32) error { _, err := p.writeVarint32(p.int32ToZigzag(value)) return NewTProtocolException(err) } // Write an i64 as a zigzag varint. -func (p *TCompactProtocol) WriteI64(value int64) error { +func (p *TCompactProtocol) WriteI64(ctx context.Context, value int64) error { _, err := p.writeVarint64(p.int64ToZigzag(value)) return NewTProtocolException(err) } // Write a double to the wire as 8 bytes. -func (p *TCompactProtocol) WriteDouble(value float64) error { +func (p *TCompactProtocol) WriteDouble(ctx context.Context, value float64) error { buf := p.buffer[0:8] binary.LittleEndian.PutUint64(buf, math.Float64bits(value)) _, err := p.trans.Write(buf) @@ -300,7 +329,7 @@ func (p *TCompactProtocol) WriteDouble(value float64) error { } // Write a string to the wire with a varint size preceding. -func (p *TCompactProtocol) WriteString(value string) error { +func (p *TCompactProtocol) WriteString(ctx context.Context, value string) error { _, e := p.writeVarint32(int32(len(value))) if e != nil { return NewTProtocolException(e) @@ -312,7 +341,7 @@ func (p *TCompactProtocol) WriteString(value string) error { } // Write a byte array, using a varint for the size. -func (p *TCompactProtocol) WriteBinary(bin []byte) error { +func (p *TCompactProtocol) WriteBinary(ctx context.Context, bin []byte) error { _, e := p.writeVarint32(int32(len(bin))) if e != nil { return NewTProtocolException(e) @@ -329,9 +358,20 @@ func (p *TCompactProtocol) WriteBinary(bin []byte) error { // // Read a message header. -func (p *TCompactProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) { +func (p *TCompactProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) { + var protocolId byte - protocolId, err := p.readByteDirect() + _, deadlineSet := ctx.Deadline() + for { + protocolId, err = p.readByteDirect() + if deadlineSet && isTimeoutError(err) && ctx.Err() == nil { + // keep retrying I/O timeout errors since we still have + // time left + continue + } + // For anything else, don't retry + break + } if err != nil { return } @@ -358,15 +398,15 @@ func (p *TCompactProtocol) ReadMessageBegin() (name string, typeId TMessageType, err = NewTProtocolException(e) return } - name, err = p.ReadString() + name, err = p.ReadString(ctx) return } -func (p *TCompactProtocol) ReadMessageEnd() error { return nil } +func (p *TCompactProtocol) ReadMessageEnd(ctx context.Context) error { return nil } // Read a struct begin. There's nothing on the wire for this, but it is our // opportunity to push a new struct begin marker onto the field stack. -func (p *TCompactProtocol) ReadStructBegin() (name string, err error) { +func (p *TCompactProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { p.lastField = append(p.lastField, p.lastFieldId) p.lastFieldId = 0 return @@ -374,15 +414,18 @@ func (p *TCompactProtocol) ReadStructBegin() (name string, err error) { // Doesn't actually consume any wire data, just removes the last field for // this struct from the field stack. -func (p *TCompactProtocol) ReadStructEnd() error { +func (p *TCompactProtocol) ReadStructEnd(ctx context.Context) error { // consume the last field we read off the wire. + if len(p.lastField) <= 0 { + return NewTProtocolExceptionWithType(INVALID_DATA, errors.New("ReadStructEnd called without matching ReadStructBegin call before")) + } p.lastFieldId = p.lastField[len(p.lastField)-1] p.lastField = p.lastField[:len(p.lastField)-1] return nil } // Read a field header off the wire. -func (p *TCompactProtocol) ReadFieldBegin() (name string, typeId TType, id int16, err error) { +func (p *TCompactProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error) { t, err := p.readByteDirect() if err != nil { return @@ -397,7 +440,7 @@ func (p *TCompactProtocol) ReadFieldBegin() (name string, typeId TType, id int16 modifier := int16((t & 0xf0) >> 4) if modifier == 0 { // not a delta. look ahead for the zigzag varint field id. - id, err = p.ReadI16() + id, err = p.ReadI16(ctx) if err != nil { return } @@ -423,12 +466,12 @@ func (p *TCompactProtocol) ReadFieldBegin() (name string, typeId TType, id int16 return } -func (p *TCompactProtocol) ReadFieldEnd() error { return nil } +func (p *TCompactProtocol) ReadFieldEnd(ctx context.Context) error { return nil } // Read a map header off the wire. If the size is zero, skip reading the key // and value type. This means that 0-length maps will yield TMaps without the // "correct" types. -func (p *TCompactProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, err error) { +func (p *TCompactProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) { size32, e := p.readVarint32() if e != nil { err = NewTProtocolException(e) @@ -452,13 +495,13 @@ func (p *TCompactProtocol) ReadMapBegin() (keyType TType, valueType TType, size return } -func (p *TCompactProtocol) ReadMapEnd() error { return nil } +func (p *TCompactProtocol) ReadMapEnd(ctx context.Context) error { return nil } // Read a list header off the wire. If the list size is 0-14, the size will // be packed into the element type header. If it's a longer list, the 4 MSB // of the element type header will be 0xF, and a varint will follow with the // true size. -func (p *TCompactProtocol) ReadListBegin() (elemType TType, size int, err error) { +func (p *TCompactProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) { size_and_type, err := p.readByteDirect() if err != nil { return @@ -484,22 +527,22 @@ func (p *TCompactProtocol) ReadListBegin() (elemType TType, size int, err error) return } -func (p *TCompactProtocol) ReadListEnd() error { return nil } +func (p *TCompactProtocol) ReadListEnd(ctx context.Context) error { return nil } // Read a set header off the wire. If the set size is 0-14, the size will // be packed into the element type header. If it's a longer set, the 4 MSB // of the element type header will be 0xF, and a varint will follow with the // true size. -func (p *TCompactProtocol) ReadSetBegin() (elemType TType, size int, err error) { - return p.ReadListBegin() +func (p *TCompactProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) { + return p.ReadListBegin(ctx) } -func (p *TCompactProtocol) ReadSetEnd() error { return nil } +func (p *TCompactProtocol) ReadSetEnd(ctx context.Context) error { return nil } // Read a boolean off the wire. If this is a boolean field, the value should // already have been read during readFieldBegin, so we'll just consume the // pre-stored value. Otherwise, read a byte. -func (p *TCompactProtocol) ReadBool() (value bool, err error) { +func (p *TCompactProtocol) ReadBool(ctx context.Context) (value bool, err error) { if p.boolValueIsNotNull { p.boolValueIsNotNull = false return p.boolValue, nil @@ -509,7 +552,7 @@ func (p *TCompactProtocol) ReadBool() (value bool, err error) { } // Read a single byte off the wire. Nothing interesting here. -func (p *TCompactProtocol) ReadByte() (int8, error) { +func (p *TCompactProtocol) ReadByte(ctx context.Context) (int8, error) { v, err := p.readByteDirect() if err != nil { return 0, NewTProtocolException(err) @@ -518,13 +561,13 @@ func (p *TCompactProtocol) ReadByte() (int8, error) { } // Read an i16 from the wire as a zigzag varint. -func (p *TCompactProtocol) ReadI16() (value int16, err error) { - v, err := p.ReadI32() +func (p *TCompactProtocol) ReadI16(ctx context.Context) (value int16, err error) { + v, err := p.ReadI32(ctx) return int16(v), err } // Read an i32 from the wire as a zigzag varint. -func (p *TCompactProtocol) ReadI32() (value int32, err error) { +func (p *TCompactProtocol) ReadI32(ctx context.Context) (value int32, err error) { v, e := p.readVarint32() if e != nil { return 0, NewTProtocolException(e) @@ -534,7 +577,7 @@ func (p *TCompactProtocol) ReadI32() (value int32, err error) { } // Read an i64 from the wire as a zigzag varint. -func (p *TCompactProtocol) ReadI64() (value int64, err error) { +func (p *TCompactProtocol) ReadI64(ctx context.Context) (value int64, err error) { v, e := p.readVarint64() if e != nil { return 0, NewTProtocolException(e) @@ -544,7 +587,7 @@ func (p *TCompactProtocol) ReadI64() (value int64, err error) { } // No magic here - just read a double off the wire. -func (p *TCompactProtocol) ReadDouble() (value float64, err error) { +func (p *TCompactProtocol) ReadDouble(ctx context.Context) (value float64, err error) { longBits := p.buffer[0:8] _, e := io.ReadFull(p.trans, longBits) if e != nil { @@ -554,43 +597,44 @@ func (p *TCompactProtocol) ReadDouble() (value float64, err error) { } // Reads a []byte (via readBinary), and then UTF-8 decodes it. -func (p *TCompactProtocol) ReadString() (value string, err error) { +func (p *TCompactProtocol) ReadString(ctx context.Context) (value string, err error) { length, e := p.readVarint32() if e != nil { return "", NewTProtocolException(e) } - if length < 0 { - return "", invalidDataLength + err = checkSizeForProtocol(length, p.cfg) + if err != nil { + return } - if length == 0 { return "", nil } - var buf []byte - if length <= int32(len(p.buffer)) { - buf = p.buffer[0:length] - } else { - buf = make([]byte, length) + if length < int32(len(p.buffer)) { + // Avoid allocation on small reads + buf := p.buffer[:length] + read, e := io.ReadFull(p.trans, buf) + return string(buf[:read]), NewTProtocolException(e) } - _, e = io.ReadFull(p.trans, buf) + + buf, e := safeReadBytes(length, p.trans) return string(buf), NewTProtocolException(e) } // Read a []byte from the wire. -func (p *TCompactProtocol) ReadBinary() (value []byte, err error) { +func (p *TCompactProtocol) ReadBinary(ctx context.Context) (value []byte, err error) { length, e := p.readVarint32() if e != nil { return nil, NewTProtocolException(e) } + err = checkSizeForProtocol(length, p.cfg) + if err != nil { + return + } if length == 0 { return []byte{}, nil } - if length < 0 { - return nil, invalidDataLength - } - buf := make([]byte, length) - _, e = io.ReadFull(p.trans, buf) + buf, e := safeReadBytes(length, p.trans) return buf, NewTProtocolException(e) } @@ -598,8 +642,8 @@ func (p *TCompactProtocol) Flush(ctx context.Context) (err error) { return NewTProtocolException(p.trans.Flush(ctx)) } -func (p *TCompactProtocol) Skip(fieldType TType) (err error) { - return SkipDefaultDepth(p, fieldType) +func (p *TCompactProtocol) Skip(ctx context.Context, fieldType TType) (err error) { + return SkipDefaultDepth(ctx, p, fieldType) } func (p *TCompactProtocol) Transport() TTransport { @@ -801,10 +845,21 @@ func (p *TCompactProtocol) getTType(t tCompactType) (TType, error) { case COMPACT_STRUCT: return STRUCT, nil } - return STOP, TException(fmt.Errorf("don't know what type: %v", t&0x0f)) + return STOP, NewTProtocolException(fmt.Errorf("don't know what type: %v", t&0x0f)) } // Given a TType value, find the appropriate TCompactProtocol.Types constant. func (p *TCompactProtocol) getCompactType(t TType) tCompactType { return ttypeToCompactType[t] } + +func (p *TCompactProtocol) SetTConfiguration(conf *TConfiguration) { + PropagateTConfiguration(p.trans, conf) + PropagateTConfiguration(p.origTransport, conf) + p.cfg = conf +} + +var ( + _ TConfigurationSetter = (*TCompactProtocolFactory)(nil) + _ TConfigurationSetter = (*TCompactProtocol)(nil) +) diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/configuration.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/configuration.go new file mode 100644 index 000000000..454d9f377 --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/configuration.go @@ -0,0 +1,378 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "crypto/tls" + "fmt" + "time" +) + +// Default TConfiguration values. +const ( + DEFAULT_MAX_MESSAGE_SIZE = 100 * 1024 * 1024 + DEFAULT_MAX_FRAME_SIZE = 16384000 + + DEFAULT_TBINARY_STRICT_READ = false + DEFAULT_TBINARY_STRICT_WRITE = true + + DEFAULT_CONNECT_TIMEOUT = 0 + DEFAULT_SOCKET_TIMEOUT = 0 +) + +// TConfiguration defines some configurations shared between TTransport, +// TProtocol, TTransportFactory, TProtocolFactory, and other implementations. +// +// When constructing TConfiguration, you only need to specify the non-default +// fields. All zero values have sane default values. +// +// Not all configurations defined are applicable to all implementations. +// Implementations are free to ignore the configurations not applicable to them. +// +// All functions attached to this type are nil-safe. +// +// See [1] for spec. +// +// NOTE: When using TConfiguration, fill in all the configurations you want to +// set across the stack, not only the ones you want to set in the immediate +// TTransport/TProtocol. +// +// For example, say you want to migrate this old code into using TConfiguration: +// +// sccket := thrift.NewTSocketTimeout("host:port", time.Second) +// transFactory := thrift.NewTFramedTransportFactoryMaxLength( +// thrift.NewTTransportFactory(), +// 1024 * 1024 * 256, +// ) +// protoFactory := thrift.NewTBinaryProtocolFactory(true, true) +// +// This is the wrong way to do it because in the end the TConfiguration used by +// socket and transFactory will be overwritten by the one used by protoFactory +// because of TConfiguration propagation: +// +// // bad example, DO NOT USE +// sccket := thrift.NewTSocketConf("host:port", &thrift.TConfiguration{ +// ConnectTimeout: time.Second, +// SocketTimeout: time.Second, +// }) +// transFactory := thrift.NewTFramedTransportFactoryConf( +// thrift.NewTTransportFactory(), +// &thrift.TConfiguration{ +// MaxFrameSize: 1024 * 1024 * 256, +// }, +// ) +// protoFactory := thrift.NewTBinaryProtocolFactoryConf(&thrift.TConfiguration{ +// TBinaryStrictRead: thrift.BoolPtr(true), +// TBinaryStrictWrite: thrift.BoolPtr(true), +// }) +// +// This is the correct way to do it: +// +// conf := &thrift.TConfiguration{ +// ConnectTimeout: time.Second, +// SocketTimeout: time.Second, +// +// MaxFrameSize: 1024 * 1024 * 256, +// +// TBinaryStrictRead: thrift.BoolPtr(true), +// TBinaryStrictWrite: thrift.BoolPtr(true), +// } +// sccket := thrift.NewTSocketConf("host:port", conf) +// transFactory := thrift.NewTFramedTransportFactoryConf(thrift.NewTTransportFactory(), conf) +// protoFactory := thrift.NewTBinaryProtocolFactoryConf(conf) +// +// [1]: https://github.com/apache/thrift/blob/master/doc/specs/thrift-tconfiguration.md +type TConfiguration struct { + // If <= 0, DEFAULT_MAX_MESSAGE_SIZE will be used instead. + MaxMessageSize int32 + + // If <= 0, DEFAULT_MAX_FRAME_SIZE will be used instead. + // + // Also if MaxMessageSize < MaxFrameSize, + // MaxMessageSize will be used instead. + MaxFrameSize int32 + + // Connect and socket timeouts to be used by TSocket and TSSLSocket. + // + // 0 means no timeout. + // + // If <0, DEFAULT_CONNECT_TIMEOUT and DEFAULT_SOCKET_TIMEOUT will be + // used. + ConnectTimeout time.Duration + SocketTimeout time.Duration + + // TLS config to be used by TSSLSocket. + TLSConfig *tls.Config + + // Strict read/write configurations for TBinaryProtocol. + // + // BoolPtr helper function is available to use literal values. + TBinaryStrictRead *bool + TBinaryStrictWrite *bool + + // The wrapped protocol id to be used in THeader transport/protocol. + // + // THeaderProtocolIDPtr and THeaderProtocolIDPtrMust helper functions + // are provided to help filling this value. + THeaderProtocolID *THeaderProtocolID + + // Used internally by deprecated constructors, to avoid overriding + // underlying TTransport/TProtocol's cfg by accidental propagations. + // + // For external users this is always false. + noPropagation bool +} + +// GetMaxMessageSize returns the max message size an implementation should +// follow. +// +// It's nil-safe. DEFAULT_MAX_MESSAGE_SIZE will be returned if tc is nil. +func (tc *TConfiguration) GetMaxMessageSize() int32 { + if tc == nil || tc.MaxMessageSize <= 0 { + return DEFAULT_MAX_MESSAGE_SIZE + } + return tc.MaxMessageSize +} + +// GetMaxFrameSize returns the max frame size an implementation should follow. +// +// It's nil-safe. DEFAULT_MAX_FRAME_SIZE will be returned if tc is nil. +// +// If the configured max message size is smaller than the configured max frame +// size, the smaller one will be returned instead. +func (tc *TConfiguration) GetMaxFrameSize() int32 { + if tc == nil { + return DEFAULT_MAX_FRAME_SIZE + } + maxFrameSize := tc.MaxFrameSize + if maxFrameSize <= 0 { + maxFrameSize = DEFAULT_MAX_FRAME_SIZE + } + if maxMessageSize := tc.GetMaxMessageSize(); maxMessageSize < maxFrameSize { + return maxMessageSize + } + return maxFrameSize +} + +// GetConnectTimeout returns the connect timeout should be used by TSocket and +// TSSLSocket. +// +// It's nil-safe. If tc is nil, DEFAULT_CONNECT_TIMEOUT will be returned instead. +func (tc *TConfiguration) GetConnectTimeout() time.Duration { + if tc == nil || tc.ConnectTimeout < 0 { + return DEFAULT_CONNECT_TIMEOUT + } + return tc.ConnectTimeout +} + +// GetSocketTimeout returns the socket timeout should be used by TSocket and +// TSSLSocket. +// +// It's nil-safe. If tc is nil, DEFAULT_SOCKET_TIMEOUT will be returned instead. +func (tc *TConfiguration) GetSocketTimeout() time.Duration { + if tc == nil || tc.SocketTimeout < 0 { + return DEFAULT_SOCKET_TIMEOUT + } + return tc.SocketTimeout +} + +// GetTLSConfig returns the tls config should be used by TSSLSocket. +// +// It's nil-safe. If tc is nil, nil will be returned instead. +func (tc *TConfiguration) GetTLSConfig() *tls.Config { + if tc == nil { + return nil + } + return tc.TLSConfig +} + +// GetTBinaryStrictRead returns the strict read configuration TBinaryProtocol +// should follow. +// +// It's nil-safe. DEFAULT_TBINARY_STRICT_READ will be returned if either tc or +// tc.TBinaryStrictRead is nil. +func (tc *TConfiguration) GetTBinaryStrictRead() bool { + if tc == nil || tc.TBinaryStrictRead == nil { + return DEFAULT_TBINARY_STRICT_READ + } + return *tc.TBinaryStrictRead +} + +// GetTBinaryStrictWrite returns the strict read configuration TBinaryProtocol +// should follow. +// +// It's nil-safe. DEFAULT_TBINARY_STRICT_WRITE will be returned if either tc or +// tc.TBinaryStrictWrite is nil. +func (tc *TConfiguration) GetTBinaryStrictWrite() bool { + if tc == nil || tc.TBinaryStrictWrite == nil { + return DEFAULT_TBINARY_STRICT_WRITE + } + return *tc.TBinaryStrictWrite +} + +// GetTHeaderProtocolID returns the THeaderProtocolID should be used by +// THeaderProtocol clients (for servers, they always use the same one as the +// client instead). +// +// It's nil-safe. If either tc or tc.THeaderProtocolID is nil, +// THeaderProtocolDefault will be returned instead. +// THeaderProtocolDefault will also be returned if configured value is invalid. +func (tc *TConfiguration) GetTHeaderProtocolID() THeaderProtocolID { + if tc == nil || tc.THeaderProtocolID == nil { + return THeaderProtocolDefault + } + protoID := *tc.THeaderProtocolID + if err := protoID.Validate(); err != nil { + return THeaderProtocolDefault + } + return protoID +} + +// THeaderProtocolIDPtr validates and returns the pointer to id. +// +// If id is not a valid THeaderProtocolID, a pointer to THeaderProtocolDefault +// and the validation error will be returned. +func THeaderProtocolIDPtr(id THeaderProtocolID) (*THeaderProtocolID, error) { + err := id.Validate() + if err != nil { + id = THeaderProtocolDefault + } + return &id, err +} + +// THeaderProtocolIDPtrMust validates and returns the pointer to id. +// +// It's similar to THeaderProtocolIDPtr, but it panics on validation errors +// instead of returning them. +func THeaderProtocolIDPtrMust(id THeaderProtocolID) *THeaderProtocolID { + ptr, err := THeaderProtocolIDPtr(id) + if err != nil { + panic(err) + } + return ptr +} + +// TConfigurationSetter is an optional interface TProtocol, TTransport, +// TProtocolFactory, TTransportFactory, and other implementations can implement. +// +// It's intended to be called during intializations. +// The behavior of calling SetTConfiguration on a TTransport/TProtocol in the +// middle of a message is undefined: +// It may or may not change the behavior of the current processing message, +// and it may even cause the current message to fail. +// +// Note for implementations: SetTConfiguration might be called multiple times +// with the same value in quick successions due to the implementation of the +// propagation. Implementations should make SetTConfiguration as simple as +// possible (usually just overwrite the stored configuration and propagate it to +// the wrapped TTransports/TProtocols). +type TConfigurationSetter interface { + SetTConfiguration(*TConfiguration) +} + +// PropagateTConfiguration propagates cfg to impl if impl implements +// TConfigurationSetter and cfg is non-nil, otherwise it does nothing. +// +// NOTE: nil cfg is not propagated. If you want to propagate a TConfiguration +// with everything being default value, use &TConfiguration{} explicitly instead. +func PropagateTConfiguration(impl interface{}, cfg *TConfiguration) { + if cfg == nil || cfg.noPropagation { + return + } + + if setter, ok := impl.(TConfigurationSetter); ok { + setter.SetTConfiguration(cfg) + } +} + +func checkSizeForProtocol(size int32, cfg *TConfiguration) error { + if size < 0 { + return NewTProtocolExceptionWithType( + NEGATIVE_SIZE, + fmt.Errorf("negative size: %d", size), + ) + } + if size > cfg.GetMaxMessageSize() { + return NewTProtocolExceptionWithType( + SIZE_LIMIT, + fmt.Errorf("size exceeded max allowed: %d", size), + ) + } + return nil +} + +type tTransportFactoryConf struct { + delegate TTransportFactory + cfg *TConfiguration +} + +func (f *tTransportFactoryConf) GetTransport(orig TTransport) (TTransport, error) { + trans, err := f.delegate.GetTransport(orig) + if err == nil { + PropagateTConfiguration(orig, f.cfg) + PropagateTConfiguration(trans, f.cfg) + } + return trans, err +} + +func (f *tTransportFactoryConf) SetTConfiguration(cfg *TConfiguration) { + PropagateTConfiguration(f.delegate, f.cfg) + f.cfg = cfg +} + +// TTransportFactoryConf wraps a TTransportFactory to propagate +// TConfiguration on the factory's GetTransport calls. +func TTransportFactoryConf(delegate TTransportFactory, conf *TConfiguration) TTransportFactory { + return &tTransportFactoryConf{ + delegate: delegate, + cfg: conf, + } +} + +type tProtocolFactoryConf struct { + delegate TProtocolFactory + cfg *TConfiguration +} + +func (f *tProtocolFactoryConf) GetProtocol(trans TTransport) TProtocol { + proto := f.delegate.GetProtocol(trans) + PropagateTConfiguration(trans, f.cfg) + PropagateTConfiguration(proto, f.cfg) + return proto +} + +func (f *tProtocolFactoryConf) SetTConfiguration(cfg *TConfiguration) { + PropagateTConfiguration(f.delegate, f.cfg) + f.cfg = cfg +} + +// TProtocolFactoryConf wraps a TProtocolFactory to propagate +// TConfiguration on the factory's GetProtocol calls. +func TProtocolFactoryConf(delegate TProtocolFactory, conf *TConfiguration) TProtocolFactory { + return &tProtocolFactoryConf{ + delegate: delegate, + cfg: conf, + } +} + +var ( + _ TConfigurationSetter = (*tTransportFactoryConf)(nil) + _ TConfigurationSetter = (*tProtocolFactoryConf)(nil) +) diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/context.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/context.go similarity index 100% rename from src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/context.go rename to src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/context.go diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/debug_protocol.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/debug_protocol.go new file mode 100644 index 000000000..fdf9bfec1 --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/debug_protocol.go @@ -0,0 +1,447 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "fmt" +) + +type TDebugProtocol struct { + // Required. The actual TProtocol to do the read/write. + Delegate TProtocol + + // Optional. The logger and prefix to log all the args/return values + // from Delegate TProtocol calls. + // + // If Logger is nil, StdLogger using stdlib log package with os.Stderr + // will be used. If disable logging is desired, set Logger to NopLogger + // explicitly instead of leaving it as nil/unset. + Logger Logger + LogPrefix string + + // Optional. An TProtocol to duplicate everything read/written from Delegate. + // + // A typical use case of this is to use TSimpleJSONProtocol wrapping + // TMemoryBuffer in a middleware to json logging requests/responses. + // + // This feature is not available from TDebugProtocolFactory. In order to + // use it you have to construct TDebugProtocol directly, or set DuplicateTo + // field after getting a TDebugProtocol from the factory. + DuplicateTo TProtocol +} + +type TDebugProtocolFactory struct { + Underlying TProtocolFactory + LogPrefix string + Logger Logger +} + +// NewTDebugProtocolFactory creates a TDebugProtocolFactory. +// +// Deprecated: Please use NewTDebugProtocolFactoryWithLogger or the struct +// itself instead. This version will use the default logger from standard +// library. +func NewTDebugProtocolFactory(underlying TProtocolFactory, logPrefix string) *TDebugProtocolFactory { + return &TDebugProtocolFactory{ + Underlying: underlying, + LogPrefix: logPrefix, + Logger: StdLogger(nil), + } +} + +// NewTDebugProtocolFactoryWithLogger creates a TDebugProtocolFactory. +func NewTDebugProtocolFactoryWithLogger(underlying TProtocolFactory, logPrefix string, logger Logger) *TDebugProtocolFactory { + return &TDebugProtocolFactory{ + Underlying: underlying, + LogPrefix: logPrefix, + Logger: logger, + } +} + +func (t *TDebugProtocolFactory) GetProtocol(trans TTransport) TProtocol { + return &TDebugProtocol{ + Delegate: t.Underlying.GetProtocol(trans), + LogPrefix: t.LogPrefix, + Logger: fallbackLogger(t.Logger), + } +} + +func (tdp *TDebugProtocol) logf(format string, v ...interface{}) { + fallbackLogger(tdp.Logger)(fmt.Sprintf(format, v...)) +} + +func (tdp *TDebugProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error { + err := tdp.Delegate.WriteMessageBegin(ctx, name, typeId, seqid) + tdp.logf("%sWriteMessageBegin(name=%#v, typeId=%#v, seqid=%#v) => %#v", tdp.LogPrefix, name, typeId, seqid, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteMessageBegin(ctx, name, typeId, seqid) + } + return err +} +func (tdp *TDebugProtocol) WriteMessageEnd(ctx context.Context) error { + err := tdp.Delegate.WriteMessageEnd(ctx) + tdp.logf("%sWriteMessageEnd() => %#v", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteMessageEnd(ctx) + } + return err +} +func (tdp *TDebugProtocol) WriteStructBegin(ctx context.Context, name string) error { + err := tdp.Delegate.WriteStructBegin(ctx, name) + tdp.logf("%sWriteStructBegin(name=%#v) => %#v", tdp.LogPrefix, name, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteStructBegin(ctx, name) + } + return err +} +func (tdp *TDebugProtocol) WriteStructEnd(ctx context.Context) error { + err := tdp.Delegate.WriteStructEnd(ctx) + tdp.logf("%sWriteStructEnd() => %#v", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteStructEnd(ctx) + } + return err +} +func (tdp *TDebugProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error { + err := tdp.Delegate.WriteFieldBegin(ctx, name, typeId, id) + tdp.logf("%sWriteFieldBegin(name=%#v, typeId=%#v, id%#v) => %#v", tdp.LogPrefix, name, typeId, id, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteFieldBegin(ctx, name, typeId, id) + } + return err +} +func (tdp *TDebugProtocol) WriteFieldEnd(ctx context.Context) error { + err := tdp.Delegate.WriteFieldEnd(ctx) + tdp.logf("%sWriteFieldEnd() => %#v", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteFieldEnd(ctx) + } + return err +} +func (tdp *TDebugProtocol) WriteFieldStop(ctx context.Context) error { + err := tdp.Delegate.WriteFieldStop(ctx) + tdp.logf("%sWriteFieldStop() => %#v", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteFieldStop(ctx) + } + return err +} +func (tdp *TDebugProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { + err := tdp.Delegate.WriteMapBegin(ctx, keyType, valueType, size) + tdp.logf("%sWriteMapBegin(keyType=%#v, valueType=%#v, size=%#v) => %#v", tdp.LogPrefix, keyType, valueType, size, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteMapBegin(ctx, keyType, valueType, size) + } + return err +} +func (tdp *TDebugProtocol) WriteMapEnd(ctx context.Context) error { + err := tdp.Delegate.WriteMapEnd(ctx) + tdp.logf("%sWriteMapEnd() => %#v", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteMapEnd(ctx) + } + return err +} +func (tdp *TDebugProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { + err := tdp.Delegate.WriteListBegin(ctx, elemType, size) + tdp.logf("%sWriteListBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteListBegin(ctx, elemType, size) + } + return err +} +func (tdp *TDebugProtocol) WriteListEnd(ctx context.Context) error { + err := tdp.Delegate.WriteListEnd(ctx) + tdp.logf("%sWriteListEnd() => %#v", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteListEnd(ctx) + } + return err +} +func (tdp *TDebugProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { + err := tdp.Delegate.WriteSetBegin(ctx, elemType, size) + tdp.logf("%sWriteSetBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteSetBegin(ctx, elemType, size) + } + return err +} +func (tdp *TDebugProtocol) WriteSetEnd(ctx context.Context) error { + err := tdp.Delegate.WriteSetEnd(ctx) + tdp.logf("%sWriteSetEnd() => %#v", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteSetEnd(ctx) + } + return err +} +func (tdp *TDebugProtocol) WriteBool(ctx context.Context, value bool) error { + err := tdp.Delegate.WriteBool(ctx, value) + tdp.logf("%sWriteBool(value=%#v) => %#v", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteBool(ctx, value) + } + return err +} +func (tdp *TDebugProtocol) WriteByte(ctx context.Context, value int8) error { + err := tdp.Delegate.WriteByte(ctx, value) + tdp.logf("%sWriteByte(value=%#v) => %#v", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteByte(ctx, value) + } + return err +} +func (tdp *TDebugProtocol) WriteI16(ctx context.Context, value int16) error { + err := tdp.Delegate.WriteI16(ctx, value) + tdp.logf("%sWriteI16(value=%#v) => %#v", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteI16(ctx, value) + } + return err +} +func (tdp *TDebugProtocol) WriteI32(ctx context.Context, value int32) error { + err := tdp.Delegate.WriteI32(ctx, value) + tdp.logf("%sWriteI32(value=%#v) => %#v", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteI32(ctx, value) + } + return err +} +func (tdp *TDebugProtocol) WriteI64(ctx context.Context, value int64) error { + err := tdp.Delegate.WriteI64(ctx, value) + tdp.logf("%sWriteI64(value=%#v) => %#v", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteI64(ctx, value) + } + return err +} +func (tdp *TDebugProtocol) WriteDouble(ctx context.Context, value float64) error { + err := tdp.Delegate.WriteDouble(ctx, value) + tdp.logf("%sWriteDouble(value=%#v) => %#v", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteDouble(ctx, value) + } + return err +} +func (tdp *TDebugProtocol) WriteString(ctx context.Context, value string) error { + err := tdp.Delegate.WriteString(ctx, value) + tdp.logf("%sWriteString(value=%#v) => %#v", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteString(ctx, value) + } + return err +} +func (tdp *TDebugProtocol) WriteBinary(ctx context.Context, value []byte) error { + err := tdp.Delegate.WriteBinary(ctx, value) + tdp.logf("%sWriteBinary(value=%#v) => %#v", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteBinary(ctx, value) + } + return err +} + +func (tdp *TDebugProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error) { + name, typeId, seqid, err = tdp.Delegate.ReadMessageBegin(ctx) + tdp.logf("%sReadMessageBegin() (name=%#v, typeId=%#v, seqid=%#v, err=%#v)", tdp.LogPrefix, name, typeId, seqid, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteMessageBegin(ctx, name, typeId, seqid) + } + return +} +func (tdp *TDebugProtocol) ReadMessageEnd(ctx context.Context) (err error) { + err = tdp.Delegate.ReadMessageEnd(ctx) + tdp.logf("%sReadMessageEnd() err=%#v", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteMessageEnd(ctx) + } + return +} +func (tdp *TDebugProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { + name, err = tdp.Delegate.ReadStructBegin(ctx) + tdp.logf("%sReadStructBegin() (name%#v, err=%#v)", tdp.LogPrefix, name, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteStructBegin(ctx, name) + } + return +} +func (tdp *TDebugProtocol) ReadStructEnd(ctx context.Context) (err error) { + err = tdp.Delegate.ReadStructEnd(ctx) + tdp.logf("%sReadStructEnd() err=%#v", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteStructEnd(ctx) + } + return +} +func (tdp *TDebugProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error) { + name, typeId, id, err = tdp.Delegate.ReadFieldBegin(ctx) + tdp.logf("%sReadFieldBegin() (name=%#v, typeId=%#v, id=%#v, err=%#v)", tdp.LogPrefix, name, typeId, id, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteFieldBegin(ctx, name, typeId, id) + } + return +} +func (tdp *TDebugProtocol) ReadFieldEnd(ctx context.Context) (err error) { + err = tdp.Delegate.ReadFieldEnd(ctx) + tdp.logf("%sReadFieldEnd() err=%#v", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteFieldEnd(ctx) + } + return +} +func (tdp *TDebugProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) { + keyType, valueType, size, err = tdp.Delegate.ReadMapBegin(ctx) + tdp.logf("%sReadMapBegin() (keyType=%#v, valueType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, keyType, valueType, size, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteMapBegin(ctx, keyType, valueType, size) + } + return +} +func (tdp *TDebugProtocol) ReadMapEnd(ctx context.Context) (err error) { + err = tdp.Delegate.ReadMapEnd(ctx) + tdp.logf("%sReadMapEnd() err=%#v", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteMapEnd(ctx) + } + return +} +func (tdp *TDebugProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) { + elemType, size, err = tdp.Delegate.ReadListBegin(ctx) + tdp.logf("%sReadListBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteListBegin(ctx, elemType, size) + } + return +} +func (tdp *TDebugProtocol) ReadListEnd(ctx context.Context) (err error) { + err = tdp.Delegate.ReadListEnd(ctx) + tdp.logf("%sReadListEnd() err=%#v", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteListEnd(ctx) + } + return +} +func (tdp *TDebugProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) { + elemType, size, err = tdp.Delegate.ReadSetBegin(ctx) + tdp.logf("%sReadSetBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteSetBegin(ctx, elemType, size) + } + return +} +func (tdp *TDebugProtocol) ReadSetEnd(ctx context.Context) (err error) { + err = tdp.Delegate.ReadSetEnd(ctx) + tdp.logf("%sReadSetEnd() err=%#v", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteSetEnd(ctx) + } + return +} +func (tdp *TDebugProtocol) ReadBool(ctx context.Context) (value bool, err error) { + value, err = tdp.Delegate.ReadBool(ctx) + tdp.logf("%sReadBool() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteBool(ctx, value) + } + return +} +func (tdp *TDebugProtocol) ReadByte(ctx context.Context) (value int8, err error) { + value, err = tdp.Delegate.ReadByte(ctx) + tdp.logf("%sReadByte() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteByte(ctx, value) + } + return +} +func (tdp *TDebugProtocol) ReadI16(ctx context.Context) (value int16, err error) { + value, err = tdp.Delegate.ReadI16(ctx) + tdp.logf("%sReadI16() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteI16(ctx, value) + } + return +} +func (tdp *TDebugProtocol) ReadI32(ctx context.Context) (value int32, err error) { + value, err = tdp.Delegate.ReadI32(ctx) + tdp.logf("%sReadI32() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteI32(ctx, value) + } + return +} +func (tdp *TDebugProtocol) ReadI64(ctx context.Context) (value int64, err error) { + value, err = tdp.Delegate.ReadI64(ctx) + tdp.logf("%sReadI64() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteI64(ctx, value) + } + return +} +func (tdp *TDebugProtocol) ReadDouble(ctx context.Context) (value float64, err error) { + value, err = tdp.Delegate.ReadDouble(ctx) + tdp.logf("%sReadDouble() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteDouble(ctx, value) + } + return +} +func (tdp *TDebugProtocol) ReadString(ctx context.Context) (value string, err error) { + value, err = tdp.Delegate.ReadString(ctx) + tdp.logf("%sReadString() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteString(ctx, value) + } + return +} +func (tdp *TDebugProtocol) ReadBinary(ctx context.Context) (value []byte, err error) { + value, err = tdp.Delegate.ReadBinary(ctx) + tdp.logf("%sReadBinary() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteBinary(ctx, value) + } + return +} +func (tdp *TDebugProtocol) Skip(ctx context.Context, fieldType TType) (err error) { + err = tdp.Delegate.Skip(ctx, fieldType) + tdp.logf("%sSkip(fieldType=%#v) (err=%#v)", tdp.LogPrefix, fieldType, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.Skip(ctx, fieldType) + } + return +} +func (tdp *TDebugProtocol) Flush(ctx context.Context) (err error) { + err = tdp.Delegate.Flush(ctx) + tdp.logf("%sFlush() (err=%#v)", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.Flush(ctx) + } + return +} + +func (tdp *TDebugProtocol) Transport() TTransport { + return tdp.Delegate.Transport() +} + +// SetTConfiguration implements TConfigurationSetter for propagation. +func (tdp *TDebugProtocol) SetTConfiguration(conf *TConfiguration) { + PropagateTConfiguration(tdp.Delegate, conf) + PropagateTConfiguration(tdp.DuplicateTo, conf) +} + +var _ TConfigurationSetter = (*TDebugProtocol)(nil) diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/deserializer.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/deserializer.go new file mode 100644 index 000000000..cefc7ecda --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/deserializer.go @@ -0,0 +1,121 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "sync" +) + +type TDeserializer struct { + Transport *TMemoryBuffer + Protocol TProtocol +} + +func NewTDeserializer() *TDeserializer { + transport := NewTMemoryBufferLen(1024) + protocol := NewTBinaryProtocolTransport(transport) + + return &TDeserializer{ + Transport: transport, + Protocol: protocol, + } +} + +func (t *TDeserializer) ReadString(ctx context.Context, msg TStruct, s string) (err error) { + t.Transport.Reset() + + err = nil + if _, err = t.Transport.Write([]byte(s)); err != nil { + return + } + if err = msg.Read(ctx, t.Protocol); err != nil { + return + } + return +} + +func (t *TDeserializer) Read(ctx context.Context, msg TStruct, b []byte) (err error) { + t.Transport.Reset() + + err = nil + if _, err = t.Transport.Write(b); err != nil { + return + } + if err = msg.Read(ctx, t.Protocol); err != nil { + return + } + return +} + +// TDeserializerPool is the thread-safe version of TDeserializer, +// it uses resource pool of TDeserializer under the hood. +// +// It must be initialized with either NewTDeserializerPool or +// NewTDeserializerPoolSizeFactory. +type TDeserializerPool struct { + pool sync.Pool +} + +// NewTDeserializerPool creates a new TDeserializerPool. +// +// NewTDeserializer can be used as the arg here. +func NewTDeserializerPool(f func() *TDeserializer) *TDeserializerPool { + return &TDeserializerPool{ + pool: sync.Pool{ + New: func() interface{} { + return f() + }, + }, + } +} + +// NewTDeserializerPoolSizeFactory creates a new TDeserializerPool with +// the given size and protocol factory. +// +// Note that the size is not the limit. The TMemoryBuffer underneath can grow +// larger than that. It just dictates the initial size. +func NewTDeserializerPoolSizeFactory(size int, factory TProtocolFactory) *TDeserializerPool { + return &TDeserializerPool{ + pool: sync.Pool{ + New: func() interface{} { + transport := NewTMemoryBufferLen(size) + protocol := factory.GetProtocol(transport) + + return &TDeserializer{ + Transport: transport, + Protocol: protocol, + } + }, + }, + } +} + +func (t *TDeserializerPool) ReadString(ctx context.Context, msg TStruct, s string) error { + d := t.pool.Get().(*TDeserializer) + defer t.pool.Put(d) + return d.ReadString(ctx, msg, s) +} + +func (t *TDeserializerPool) Read(ctx context.Context, msg TStruct, b []byte) error { + d := t.pool.Get().(*TDeserializer) + defer t.pool.Put(d) + return d.Read(ctx, msg, b) +} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/exception.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/exception.go new file mode 100644 index 000000000..53bf862ea --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/exception.go @@ -0,0 +1,116 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "errors" +) + +// Generic Thrift exception +type TException interface { + error + + TExceptionType() TExceptionType +} + +// Prepends additional information to an error without losing the Thrift exception interface +func PrependError(prepend string, err error) error { + msg := prepend + err.Error() + + var te TException + if errors.As(err, &te) { + switch te.TExceptionType() { + case TExceptionTypeTransport: + if t, ok := err.(TTransportException); ok { + return prependTTransportException(prepend, t) + } + case TExceptionTypeProtocol: + if t, ok := err.(TProtocolException); ok { + return prependTProtocolException(prepend, t) + } + case TExceptionTypeApplication: + var t TApplicationException + if errors.As(err, &t) { + return NewTApplicationException(t.TypeId(), msg) + } + } + + return wrappedTException{ + err: err, + msg: msg, + tExceptionType: te.TExceptionType(), + } + } + + return errors.New(msg) +} + +// TExceptionType is an enum type to categorize different "subclasses" of TExceptions. +type TExceptionType byte + +// TExceptionType values +const ( + TExceptionTypeUnknown TExceptionType = iota + TExceptionTypeCompiled // TExceptions defined in thrift files and generated by thrift compiler + TExceptionTypeApplication // TApplicationExceptions + TExceptionTypeProtocol // TProtocolExceptions + TExceptionTypeTransport // TTransportExceptions +) + +// WrapTException wraps an error into TException. +// +// If err is nil or already TException, it's returned as-is. +// Otherwise it will be wraped into TException with TExceptionType() returning +// TExceptionTypeUnknown, and Unwrap() returning the original error. +func WrapTException(err error) TException { + if err == nil { + return nil + } + + if te, ok := err.(TException); ok { + return te + } + + return wrappedTException{ + err: err, + msg: err.Error(), + tExceptionType: TExceptionTypeUnknown, + } +} + +type wrappedTException struct { + err error + msg string + tExceptionType TExceptionType +} + +func (w wrappedTException) Error() string { + return w.msg +} + +func (w wrappedTException) TExceptionType() TExceptionType { + return w.tExceptionType +} + +func (w wrappedTException) Unwrap() error { + return w.err +} + +var _ TException = wrappedTException{} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/framed_transport.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/framed_transport.go new file mode 100644 index 000000000..f683e7f54 --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/framed_transport.go @@ -0,0 +1,223 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bufio" + "bytes" + "context" + "encoding/binary" + "fmt" + "io" +) + +// Deprecated: Use DEFAULT_MAX_FRAME_SIZE instead. +const DEFAULT_MAX_LENGTH = 16384000 + +type TFramedTransport struct { + transport TTransport + + cfg *TConfiguration + + writeBuf bytes.Buffer + + reader *bufio.Reader + readBuf bytes.Buffer + + buffer [4]byte +} + +type tFramedTransportFactory struct { + factory TTransportFactory + cfg *TConfiguration +} + +// Deprecated: Use NewTFramedTransportFactoryConf instead. +func NewTFramedTransportFactory(factory TTransportFactory) TTransportFactory { + return NewTFramedTransportFactoryConf(factory, &TConfiguration{ + MaxFrameSize: DEFAULT_MAX_LENGTH, + + noPropagation: true, + }) +} + +// Deprecated: Use NewTFramedTransportFactoryConf instead. +func NewTFramedTransportFactoryMaxLength(factory TTransportFactory, maxLength uint32) TTransportFactory { + return NewTFramedTransportFactoryConf(factory, &TConfiguration{ + MaxFrameSize: int32(maxLength), + + noPropagation: true, + }) +} + +func NewTFramedTransportFactoryConf(factory TTransportFactory, conf *TConfiguration) TTransportFactory { + PropagateTConfiguration(factory, conf) + return &tFramedTransportFactory{ + factory: factory, + cfg: conf, + } +} + +func (p *tFramedTransportFactory) GetTransport(base TTransport) (TTransport, error) { + PropagateTConfiguration(base, p.cfg) + tt, err := p.factory.GetTransport(base) + if err != nil { + return nil, err + } + return NewTFramedTransportConf(tt, p.cfg), nil +} + +func (p *tFramedTransportFactory) SetTConfiguration(cfg *TConfiguration) { + PropagateTConfiguration(p.factory, cfg) + p.cfg = cfg +} + +// Deprecated: Use NewTFramedTransportConf instead. +func NewTFramedTransport(transport TTransport) *TFramedTransport { + return NewTFramedTransportConf(transport, &TConfiguration{ + MaxFrameSize: DEFAULT_MAX_LENGTH, + + noPropagation: true, + }) +} + +// Deprecated: Use NewTFramedTransportConf instead. +func NewTFramedTransportMaxLength(transport TTransport, maxLength uint32) *TFramedTransport { + return NewTFramedTransportConf(transport, &TConfiguration{ + MaxFrameSize: int32(maxLength), + + noPropagation: true, + }) +} + +func NewTFramedTransportConf(transport TTransport, conf *TConfiguration) *TFramedTransport { + PropagateTConfiguration(transport, conf) + return &TFramedTransport{ + transport: transport, + reader: bufio.NewReader(transport), + cfg: conf, + } +} + +func (p *TFramedTransport) Open() error { + return p.transport.Open() +} + +func (p *TFramedTransport) IsOpen() bool { + return p.transport.IsOpen() +} + +func (p *TFramedTransport) Close() error { + return p.transport.Close() +} + +func (p *TFramedTransport) Read(buf []byte) (read int, err error) { + read, err = p.readBuf.Read(buf) + if err != io.EOF { + return + } + + // For bytes.Buffer.Read, EOF would only happen when read is zero, + // but still, do a sanity check, + // in case that behavior is changed in a future version of go stdlib. + // When that happens, just return nil error, + // and let the caller call Read again to read the next frame. + if read > 0 { + return read, nil + } + + // Reaching here means that the last Read finished the last frame, + // so we need to read the next frame into readBuf now. + if err = p.readFrame(); err != nil { + return read, err + } + newRead, err := p.Read(buf[read:]) + return read + newRead, err +} + +func (p *TFramedTransport) ReadByte() (c byte, err error) { + buf := p.buffer[:1] + _, err = p.Read(buf) + if err != nil { + return + } + c = buf[0] + return +} + +func (p *TFramedTransport) Write(buf []byte) (int, error) { + n, err := p.writeBuf.Write(buf) + return n, NewTTransportExceptionFromError(err) +} + +func (p *TFramedTransport) WriteByte(c byte) error { + return p.writeBuf.WriteByte(c) +} + +func (p *TFramedTransport) WriteString(s string) (n int, err error) { + return p.writeBuf.WriteString(s) +} + +func (p *TFramedTransport) Flush(ctx context.Context) error { + size := p.writeBuf.Len() + buf := p.buffer[:4] + binary.BigEndian.PutUint32(buf, uint32(size)) + _, err := p.transport.Write(buf) + if err != nil { + p.writeBuf.Reset() + return NewTTransportExceptionFromError(err) + } + if size > 0 { + if _, err := io.Copy(p.transport, &p.writeBuf); err != nil { + p.writeBuf.Reset() + return NewTTransportExceptionFromError(err) + } + } + err = p.transport.Flush(ctx) + return NewTTransportExceptionFromError(err) +} + +func (p *TFramedTransport) readFrame() error { + buf := p.buffer[:4] + if _, err := io.ReadFull(p.reader, buf); err != nil { + return err + } + size := binary.BigEndian.Uint32(buf) + if size < 0 || size > uint32(p.cfg.GetMaxFrameSize()) { + return NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, fmt.Sprintf("Incorrect frame size (%d)", size)) + } + _, err := io.CopyN(&p.readBuf, p.reader, int64(size)) + return NewTTransportExceptionFromError(err) +} + +func (p *TFramedTransport) RemainingBytes() (num_bytes uint64) { + return uint64(p.readBuf.Len()) +} + +// SetTConfiguration implements TConfigurationSetter. +func (p *TFramedTransport) SetTConfiguration(cfg *TConfiguration) { + PropagateTConfiguration(p.transport, cfg) + p.cfg = cfg +} + +var ( + _ TConfigurationSetter = (*tFramedTransportFactory)(nil) + _ TConfigurationSetter = (*TFramedTransport)(nil) +) diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/header_context.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_context.go similarity index 93% rename from src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/header_context.go rename to src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_context.go index 21e880d66..ac9bd4882 100644 --- a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/header_context.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_context.go @@ -44,6 +44,15 @@ func SetHeader(ctx context.Context, key, value string) context.Context { ) } +// UnsetHeader unsets a previously set header in the context. +func UnsetHeader(ctx context.Context, key string) context.Context { + return context.WithValue( + ctx, + headerKey(key), + nil, + ) +} + // GetHeader returns a value of the given header from the context. func GetHeader(ctx context.Context, key string) (value string, ok bool) { if v := ctx.Value(headerKey(key)); v != nil { diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_protocol.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_protocol.go new file mode 100644 index 000000000..878041f8d --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_protocol.go @@ -0,0 +1,351 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "errors" +) + +// THeaderProtocol is a thrift protocol that implements THeader: +// https://github.com/apache/thrift/blob/master/doc/specs/HeaderFormat.md +// +// It supports either binary or compact protocol as the wrapped protocol. +// +// Most of the THeader handlings are happening inside THeaderTransport. +type THeaderProtocol struct { + transport *THeaderTransport + + // Will be initialized on first read/write. + protocol TProtocol + + cfg *TConfiguration +} + +// Deprecated: Use NewTHeaderProtocolConf instead. +func NewTHeaderProtocol(trans TTransport) *THeaderProtocol { + return newTHeaderProtocolConf(trans, &TConfiguration{ + noPropagation: true, + }) +} + +// NewTHeaderProtocolConf creates a new THeaderProtocol from the underlying +// transport with given TConfiguration. +// +// The passed in transport will be wrapped with THeaderTransport. +// +// Note that THeaderTransport handles frame and zlib by itself, +// so the underlying transport should be a raw socket transports (TSocket or TSSLSocket), +// instead of rich transports like TZlibTransport or TFramedTransport. +func NewTHeaderProtocolConf(trans TTransport, conf *TConfiguration) *THeaderProtocol { + return newTHeaderProtocolConf(trans, conf) +} + +func newTHeaderProtocolConf(trans TTransport, cfg *TConfiguration) *THeaderProtocol { + t := NewTHeaderTransportConf(trans, cfg) + p, _ := t.cfg.GetTHeaderProtocolID().GetProtocol(t) + PropagateTConfiguration(p, cfg) + return &THeaderProtocol{ + transport: t, + protocol: p, + cfg: cfg, + } +} + +type tHeaderProtocolFactory struct { + cfg *TConfiguration +} + +func (f tHeaderProtocolFactory) GetProtocol(trans TTransport) TProtocol { + return newTHeaderProtocolConf(trans, f.cfg) +} + +func (f *tHeaderProtocolFactory) SetTConfiguration(cfg *TConfiguration) { + f.cfg = cfg +} + +// Deprecated: Use NewTHeaderProtocolFactoryConf instead. +func NewTHeaderProtocolFactory() TProtocolFactory { + return NewTHeaderProtocolFactoryConf(&TConfiguration{ + noPropagation: true, + }) +} + +// NewTHeaderProtocolFactoryConf creates a factory for THeader with given +// TConfiguration. +func NewTHeaderProtocolFactoryConf(conf *TConfiguration) TProtocolFactory { + return tHeaderProtocolFactory{ + cfg: conf, + } +} + +// Transport returns the underlying transport. +// +// It's guaranteed to be of type *THeaderTransport. +func (p *THeaderProtocol) Transport() TTransport { + return p.transport +} + +// GetReadHeaders returns the THeaderMap read from transport. +func (p *THeaderProtocol) GetReadHeaders() THeaderMap { + return p.transport.GetReadHeaders() +} + +// SetWriteHeader sets a header for write. +func (p *THeaderProtocol) SetWriteHeader(key, value string) { + p.transport.SetWriteHeader(key, value) +} + +// ClearWriteHeaders clears all write headers previously set. +func (p *THeaderProtocol) ClearWriteHeaders() { + p.transport.ClearWriteHeaders() +} + +// AddTransform add a transform for writing. +func (p *THeaderProtocol) AddTransform(transform THeaderTransformID) error { + return p.transport.AddTransform(transform) +} + +func (p *THeaderProtocol) Flush(ctx context.Context) error { + return p.transport.Flush(ctx) +} + +func (p *THeaderProtocol) WriteMessageBegin(ctx context.Context, name string, typeID TMessageType, seqID int32) error { + newProto, err := p.transport.Protocol().GetProtocol(p.transport) + if err != nil { + return err + } + PropagateTConfiguration(newProto, p.cfg) + p.protocol = newProto + p.transport.SequenceID = seqID + return p.protocol.WriteMessageBegin(ctx, name, typeID, seqID) +} + +func (p *THeaderProtocol) WriteMessageEnd(ctx context.Context) error { + if err := p.protocol.WriteMessageEnd(ctx); err != nil { + return err + } + return p.transport.Flush(ctx) +} + +func (p *THeaderProtocol) WriteStructBegin(ctx context.Context, name string) error { + return p.protocol.WriteStructBegin(ctx, name) +} + +func (p *THeaderProtocol) WriteStructEnd(ctx context.Context) error { + return p.protocol.WriteStructEnd(ctx) +} + +func (p *THeaderProtocol) WriteFieldBegin(ctx context.Context, name string, typeID TType, id int16) error { + return p.protocol.WriteFieldBegin(ctx, name, typeID, id) +} + +func (p *THeaderProtocol) WriteFieldEnd(ctx context.Context) error { + return p.protocol.WriteFieldEnd(ctx) +} + +func (p *THeaderProtocol) WriteFieldStop(ctx context.Context) error { + return p.protocol.WriteFieldStop(ctx) +} + +func (p *THeaderProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { + return p.protocol.WriteMapBegin(ctx, keyType, valueType, size) +} + +func (p *THeaderProtocol) WriteMapEnd(ctx context.Context) error { + return p.protocol.WriteMapEnd(ctx) +} + +func (p *THeaderProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { + return p.protocol.WriteListBegin(ctx, elemType, size) +} + +func (p *THeaderProtocol) WriteListEnd(ctx context.Context) error { + return p.protocol.WriteListEnd(ctx) +} + +func (p *THeaderProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { + return p.protocol.WriteSetBegin(ctx, elemType, size) +} + +func (p *THeaderProtocol) WriteSetEnd(ctx context.Context) error { + return p.protocol.WriteSetEnd(ctx) +} + +func (p *THeaderProtocol) WriteBool(ctx context.Context, value bool) error { + return p.protocol.WriteBool(ctx, value) +} + +func (p *THeaderProtocol) WriteByte(ctx context.Context, value int8) error { + return p.protocol.WriteByte(ctx, value) +} + +func (p *THeaderProtocol) WriteI16(ctx context.Context, value int16) error { + return p.protocol.WriteI16(ctx, value) +} + +func (p *THeaderProtocol) WriteI32(ctx context.Context, value int32) error { + return p.protocol.WriteI32(ctx, value) +} + +func (p *THeaderProtocol) WriteI64(ctx context.Context, value int64) error { + return p.protocol.WriteI64(ctx, value) +} + +func (p *THeaderProtocol) WriteDouble(ctx context.Context, value float64) error { + return p.protocol.WriteDouble(ctx, value) +} + +func (p *THeaderProtocol) WriteString(ctx context.Context, value string) error { + return p.protocol.WriteString(ctx, value) +} + +func (p *THeaderProtocol) WriteBinary(ctx context.Context, value []byte) error { + return p.protocol.WriteBinary(ctx, value) +} + +// ReadFrame calls underlying THeaderTransport's ReadFrame function. +func (p *THeaderProtocol) ReadFrame(ctx context.Context) error { + return p.transport.ReadFrame(ctx) +} + +func (p *THeaderProtocol) ReadMessageBegin(ctx context.Context) (name string, typeID TMessageType, seqID int32, err error) { + if err = p.transport.ReadFrame(ctx); err != nil { + return + } + + var newProto TProtocol + newProto, err = p.transport.Protocol().GetProtocol(p.transport) + if err != nil { + var tAppExc TApplicationException + if !errors.As(err, &tAppExc) { + return + } + if e := p.protocol.WriteMessageBegin(ctx, "", EXCEPTION, seqID); e != nil { + return + } + if e := tAppExc.Write(ctx, p.protocol); e != nil { + return + } + if e := p.protocol.WriteMessageEnd(ctx); e != nil { + return + } + if e := p.transport.Flush(ctx); e != nil { + return + } + return + } + PropagateTConfiguration(newProto, p.cfg) + p.protocol = newProto + + return p.protocol.ReadMessageBegin(ctx) +} + +func (p *THeaderProtocol) ReadMessageEnd(ctx context.Context) error { + return p.protocol.ReadMessageEnd(ctx) +} + +func (p *THeaderProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { + return p.protocol.ReadStructBegin(ctx) +} + +func (p *THeaderProtocol) ReadStructEnd(ctx context.Context) error { + return p.protocol.ReadStructEnd(ctx) +} + +func (p *THeaderProtocol) ReadFieldBegin(ctx context.Context) (name string, typeID TType, id int16, err error) { + return p.protocol.ReadFieldBegin(ctx) +} + +func (p *THeaderProtocol) ReadFieldEnd(ctx context.Context) error { + return p.protocol.ReadFieldEnd(ctx) +} + +func (p *THeaderProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) { + return p.protocol.ReadMapBegin(ctx) +} + +func (p *THeaderProtocol) ReadMapEnd(ctx context.Context) error { + return p.protocol.ReadMapEnd(ctx) +} + +func (p *THeaderProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) { + return p.protocol.ReadListBegin(ctx) +} + +func (p *THeaderProtocol) ReadListEnd(ctx context.Context) error { + return p.protocol.ReadListEnd(ctx) +} + +func (p *THeaderProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) { + return p.protocol.ReadSetBegin(ctx) +} + +func (p *THeaderProtocol) ReadSetEnd(ctx context.Context) error { + return p.protocol.ReadSetEnd(ctx) +} + +func (p *THeaderProtocol) ReadBool(ctx context.Context) (value bool, err error) { + return p.protocol.ReadBool(ctx) +} + +func (p *THeaderProtocol) ReadByte(ctx context.Context) (value int8, err error) { + return p.protocol.ReadByte(ctx) +} + +func (p *THeaderProtocol) ReadI16(ctx context.Context) (value int16, err error) { + return p.protocol.ReadI16(ctx) +} + +func (p *THeaderProtocol) ReadI32(ctx context.Context) (value int32, err error) { + return p.protocol.ReadI32(ctx) +} + +func (p *THeaderProtocol) ReadI64(ctx context.Context) (value int64, err error) { + return p.protocol.ReadI64(ctx) +} + +func (p *THeaderProtocol) ReadDouble(ctx context.Context) (value float64, err error) { + return p.protocol.ReadDouble(ctx) +} + +func (p *THeaderProtocol) ReadString(ctx context.Context) (value string, err error) { + return p.protocol.ReadString(ctx) +} + +func (p *THeaderProtocol) ReadBinary(ctx context.Context) (value []byte, err error) { + return p.protocol.ReadBinary(ctx) +} + +func (p *THeaderProtocol) Skip(ctx context.Context, fieldType TType) error { + return p.protocol.Skip(ctx, fieldType) +} + +// SetTConfiguration implements TConfigurationSetter. +func (p *THeaderProtocol) SetTConfiguration(cfg *TConfiguration) { + PropagateTConfiguration(p.transport, cfg) + PropagateTConfiguration(p.protocol, cfg) + p.cfg = cfg +} + +var ( + _ TConfigurationSetter = (*tHeaderProtocolFactory)(nil) + _ TConfigurationSetter = (*THeaderProtocol)(nil) +) diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/header_transport.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_transport.go similarity index 80% rename from src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/header_transport.go rename to src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_transport.go index 5343ccb46..f5736df42 100644 --- a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/header_transport.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_transport.go @@ -75,6 +75,15 @@ const ( THeaderProtocolDefault = THeaderProtocolBinary ) +// Declared globally to avoid repetitive allocations, not really used. +var globalMemoryBuffer = NewTMemoryBuffer() + +// Validate checks whether the THeaderProtocolID is a valid/supported one. +func (id THeaderProtocolID) Validate() error { + _, err := id.GetProtocol(globalMemoryBuffer) + return err +} + // GetProtocol gets the corresponding TProtocol from the wrapped protocol id. func (id THeaderProtocolID) GetProtocol(trans TTransport) (TProtocol, error) { switch id { @@ -84,7 +93,7 @@ func (id THeaderProtocolID) GetProtocol(trans TTransport) (TProtocol, error) { fmt.Sprintf("THeader protocol id %d not supported", id), ) case THeaderProtocolBinary: - return NewTBinaryProtocolFactoryDefault().GetProtocol(trans), nil + return NewTBinaryProtocolTransport(trans), nil case THeaderProtocolCompact: return NewTCompactProtocol(trans), nil } @@ -93,11 +102,12 @@ func (id THeaderProtocolID) GetProtocol(trans TTransport) (TProtocol, error) { // THeaderTransformID defines the numeric id of the transform used. type THeaderTransformID int32 -// THeaderTransformID values +// THeaderTransformID values. +// +// Values not defined here are not currently supported, namely HMAC and Snappy. const ( TransformNone THeaderTransformID = iota // 0, no special handling TransformZlib // 1, zlib - // Rest of the values are not currently supported, namely HMAC and Snappy. ) var supportedTransformIDs = map[THeaderTransformID]bool{ @@ -255,6 +265,7 @@ type THeaderTransport struct { clientType clientType protocolID THeaderProtocolID + cfg *TConfiguration // buffer is used in the following scenarios to avoid repetitive // allocations, while 4 is big enough for all those scenarios: @@ -266,22 +277,35 @@ type THeaderTransport struct { var _ TTransport = (*THeaderTransport)(nil) -// NewTHeaderTransport creates THeaderTransport from the underlying transport. +// Deprecated: Use NewTHeaderTransportConf instead. +func NewTHeaderTransport(trans TTransport) *THeaderTransport { + return NewTHeaderTransportConf(trans, &TConfiguration{ + noPropagation: true, + }) +} + +// NewTHeaderTransportConf creates THeaderTransport from the +// underlying transport, with given TConfiguration attached. // -// Please note that THeaderTransport handles framing and zlib by itself, -// so the underlying transport should be the raw socket transports (TSocket or TSSLSocket), -// instead of rich transports like TZlibTransport or TFramedTransport. +// If trans is already a *THeaderTransport, it will be returned as is, +// but with TConfiguration overridden by the value passed in. // -// If trans is already a *THeaderTransport, it will be returned as is. -func NewTHeaderTransport(trans TTransport) *THeaderTransport { +// The protocol ID in TConfiguration is only useful for client transports. +// For servers, +// the protocol ID will be overridden again to the one set by the client, +// to ensure that servers always speak the same dialect as the client. +func NewTHeaderTransportConf(trans TTransport, conf *TConfiguration) *THeaderTransport { if ht, ok := trans.(*THeaderTransport); ok { + ht.SetTConfiguration(conf) return ht } + PropagateTConfiguration(trans, conf) return &THeaderTransport{ transport: trans, reader: bufio.NewReader(trans), writeHeaders: make(THeaderMap), - protocolID: THeaderProtocolDefault, + protocolID: conf.GetTHeaderProtocolID(), + cfg: conf, } } @@ -297,18 +321,34 @@ func (t *THeaderTransport) IsOpen() bool { // ReadFrame tries to read the frame header, guess the client type, and handle // unframed clients. -func (t *THeaderTransport) ReadFrame() error { +func (t *THeaderTransport) ReadFrame(ctx context.Context) error { if !t.needReadFrame() { // No need to read frame, skipping. return nil } + // Peek and handle the first 32 bits. // They could either be the length field of a framed message, // or the first bytes of an unframed message. - buf, err := t.reader.Peek(size32) + var buf []byte + var err error + // This is also usually the first read from a connection, + // so handle retries around socket timeouts. + _, deadlineSet := ctx.Deadline() + for { + buf, err = t.reader.Peek(size32) + if deadlineSet && isTimeoutError(err) && ctx.Err() == nil { + // This is I/O timeout and we still have time, + // continue trying + continue + } + // For anything else, do not retry + break + } if err != nil { return err } + frameSize := binary.BigEndian.Uint32(buf) if frameSize&VERSION_MASK == VERSION_1 { t.clientType = clientUnframedBinary @@ -321,7 +361,7 @@ func (t *THeaderTransport) ReadFrame() error { // At this point it should be a framed message, // sanity check on frameSize then discard the peeked part. - if frameSize > THeaderMaxFrameSize { + if frameSize > THeaderMaxFrameSize || frameSize > uint32(t.cfg.GetMaxFrameSize()) { return NewTProtocolExceptionWithType( SIZE_LIMIT, errors.New("frame too large"), @@ -330,10 +370,7 @@ func (t *THeaderTransport) ReadFrame() error { t.reader.Discard(size32) // Read the frame fully into frameBuffer. - _, err = io.Copy( - &t.frameBuffer, - io.LimitReader(t.reader, int64(frameSize)), - ) + _, err = io.CopyN(&t.frameBuffer, t.reader, int64(frameSize)) if err != nil { return err } @@ -344,7 +381,7 @@ func (t *THeaderTransport) ReadFrame() error { version := binary.BigEndian.Uint32(buf) if version&THeaderHeaderMask == THeaderHeaderMagic { t.clientType = clientHeaders - return t.parseHeaders(frameSize) + return t.parseHeaders(ctx, frameSize) } if version&VERSION_MASK == VERSION_1 { t.clientType = clientFramedBinary @@ -374,7 +411,7 @@ func (t *THeaderTransport) endOfFrame() error { return t.frameReader.Close() } -func (t *THeaderTransport) parseHeaders(frameSize uint32) error { +func (t *THeaderTransport) parseHeaders(ctx context.Context, frameSize uint32) error { if t.clientType != clientHeaders { return nil } @@ -395,11 +432,12 @@ func (t *THeaderTransport) parseHeaders(frameSize uint32) error { ) } headerBuf := NewTMemoryBuffer() - _, err = io.Copy(headerBuf, io.LimitReader(&t.frameBuffer, headerLength)) + _, err = io.CopyN(headerBuf, &t.frameBuffer, headerLength) if err != nil { return err } hp := NewTCompactProtocol(headerBuf) + hp.SetTConfiguration(t.cfg) // At this point the header is already read into headerBuf, // and t.frameBuffer starts from the actual payload. @@ -408,6 +446,7 @@ func (t *THeaderTransport) parseHeaders(frameSize uint32) error { return err } t.protocolID = THeaderProtocolID(protoID) + var transformCount int32 transformCount, err = hp.readVarint32() if err != nil { @@ -442,7 +481,7 @@ func (t *THeaderTransport) parseHeaders(frameSize uint32) error { headers := make(THeaderMap) for { infoType, err := hp.readVarint32() - if err == io.EOF { + if errors.Is(err, io.EOF) { break } if err != nil { @@ -454,11 +493,11 @@ func (t *THeaderTransport) parseHeaders(frameSize uint32) error { return err } for i := 0; i < int(count); i++ { - key, err := hp.ReadString() + key, err := hp.ReadString(ctx) if err != nil { return err } - value, err := hp.ReadString() + value, err := hp.ReadString(ctx) if err != nil { return err } @@ -488,21 +527,37 @@ func (t *THeaderTransport) needReadFrame() bool { } func (t *THeaderTransport) Read(p []byte) (read int, err error) { - err = t.ReadFrame() + // Here using context.Background instead of a context passed in is safe. + // First is that there's no way to pass context into this function. + // Then, 99% of the case when calling this Read frame is already read + // into frameReader. ReadFrame here is more of preventing bugs that + // didn't call ReadFrame before calling Read. + err = t.ReadFrame(context.Background()) if err != nil { return } if t.frameReader != nil { read, err = t.frameReader.Read(p) - if err == io.EOF { + if err == nil && t.frameBuffer.Len() <= 0 { + // the last Read finished the frame, do endOfFrame + // handling here. + err = t.endOfFrame() + } else if err == io.EOF { err = t.endOfFrame() if err != nil { return } - if read < len(p) { - var nextRead int - nextRead, err = t.Read(p[read:]) - read += nextRead + if read == 0 { + // Try to read the next frame when we hit EOF + // (end of frame) immediately. + // When we got here, it means the last read + // finished the previous frame, but didn't + // do endOfFrame handling yet. + // We have to read the next frame here, + // as otherwise we would return 0 and nil, + // which is a case not handled well by most + // protocol implementations. + return t.Read(p) } } return @@ -534,6 +589,7 @@ func (t *THeaderTransport) Flush(ctx context.Context) error { case clientHeaders: headers := NewTMemoryBuffer() hp := NewTCompactProtocol(headers) + hp.SetTConfiguration(t.cfg) if _, err := hp.writeVarint32(int32(t.protocolID)); err != nil { return NewTTransportExceptionFromError(err) } @@ -553,10 +609,10 @@ func (t *THeaderTransport) Flush(ctx context.Context) error { return NewTTransportExceptionFromError(err) } for key, value := range t.writeHeaders { - if err := hp.WriteString(key); err != nil { + if err := hp.WriteString(ctx, key); err != nil { return NewTTransportExceptionFromError(err) } - if err := hp.WriteString(value); err != nil { + if err := hp.WriteString(ctx, value); err != nil { return NewTTransportExceptionFromError(err) } } @@ -696,17 +752,37 @@ func (t *THeaderTransport) isFramed() bool { } } +// SetTConfiguration implements TConfigurationSetter. +func (t *THeaderTransport) SetTConfiguration(cfg *TConfiguration) { + PropagateTConfiguration(t.transport, cfg) + t.cfg = cfg +} + // THeaderTransportFactory is a TTransportFactory implementation to create // THeaderTransport. +// +// It also implements TConfigurationSetter. type THeaderTransportFactory struct { // The underlying factory, could be nil. Factory TTransportFactory + + cfg *TConfiguration } -// NewTHeaderTransportFactory creates a new *THeaderTransportFactory. +// Deprecated: Use NewTHeaderTransportFactoryConf instead. func NewTHeaderTransportFactory(factory TTransportFactory) TTransportFactory { + return NewTHeaderTransportFactoryConf(factory, &TConfiguration{ + noPropagation: true, + }) +} + +// NewTHeaderTransportFactoryConf creates a new *THeaderTransportFactory with +// the given *TConfiguration. +func NewTHeaderTransportFactoryConf(factory TTransportFactory, conf *TConfiguration) TTransportFactory { return &THeaderTransportFactory{ Factory: factory, + + cfg: conf, } } @@ -717,7 +793,18 @@ func (f *THeaderTransportFactory) GetTransport(trans TTransport) (TTransport, er if err != nil { return nil, err } - return NewTHeaderTransport(t), nil + return NewTHeaderTransportConf(t, f.cfg), nil } - return NewTHeaderTransport(trans), nil + return NewTHeaderTransportConf(trans, f.cfg), nil } + +// SetTConfiguration implements TConfigurationSetter. +func (f *THeaderTransportFactory) SetTConfiguration(cfg *TConfiguration) { + PropagateTConfiguration(f.Factory, f.cfg) + f.cfg = cfg +} + +var ( + _ TConfigurationSetter = (*THeaderTransportFactory)(nil) + _ TConfigurationSetter = (*THeaderTransport)(nil) +) diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/http_client.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_client.go similarity index 88% rename from src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/http_client.go rename to src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_client.go index 5c82bf538..15015864f 100644 --- a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/http_client.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_client.go @@ -22,6 +22,7 @@ package thrift import ( "bytes" "context" + "errors" "io" "io/ioutil" "net/http" @@ -159,26 +160,37 @@ func (p *THttpClient) Read(buf []byte) (int, error) { return 0, NewTTransportException(NOT_OPEN, "Response buffer is empty, no request.") } n, err := p.response.Body.Read(buf) - if n > 0 && (err == nil || err == io.EOF) { + if n > 0 && (err == nil || errors.Is(err, io.EOF)) { return n, nil } return n, NewTTransportExceptionFromError(err) } func (p *THttpClient) ReadByte() (c byte, err error) { + if p.response == nil { + return 0, NewTTransportException(NOT_OPEN, "Response buffer is empty, no request.") + } return readByte(p.response.Body) } func (p *THttpClient) Write(buf []byte) (int, error) { - n, err := p.requestBuffer.Write(buf) - return n, err + if p.requestBuffer == nil { + return 0, NewTTransportException(NOT_OPEN, "Request buffer is nil, connection may have been closed.") + } + return p.requestBuffer.Write(buf) } func (p *THttpClient) WriteByte(c byte) error { + if p.requestBuffer == nil { + return NewTTransportException(NOT_OPEN, "Request buffer is nil, connection may have been closed.") + } return p.requestBuffer.WriteByte(c) } func (p *THttpClient) WriteString(s string) (n int, err error) { + if p.requestBuffer == nil { + return 0, NewTTransportException(NOT_OPEN, "Request buffer is nil, connection may have been closed.") + } return p.requestBuffer.WriteString(s) } @@ -186,7 +198,11 @@ func (p *THttpClient) Flush(ctx context.Context) error { // Close any previous response body to avoid leaking connections. p.closeResponse() - req, err := http.NewRequest("POST", p.url.String(), p.requestBuffer) + // Give up the ownership of the current request buffer to http request, + // and create a new buffer for the next request. + buf := p.requestBuffer + p.requestBuffer = new(bytes.Buffer) + req, err := http.NewRequest("POST", p.url.String(), buf) if err != nil { return NewTTransportExceptionFromError(err) } @@ -218,7 +234,7 @@ func (p *THttpClient) RemainingBytes() (num_bytes uint64) { } const maxSize = ^uint64(0) - return maxSize // the thruth is, we just don't know unless framed is used + return maxSize // the truth is, we just don't know unless framed is used } // Deprecated: Use NewTHttpClientTransportFactory instead. diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/http_transport.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_transport.go similarity index 90% rename from src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/http_transport.go rename to src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_transport.go index 66f0f388a..bc6922762 100644 --- a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/http_transport.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_transport.go @@ -24,6 +24,7 @@ import ( "io" "net/http" "strings" + "sync" ) // NewThriftHandlerFunc is a function that create a ready to use Apache Thrift Handler function @@ -40,14 +41,24 @@ func NewThriftHandlerFunc(processor TProcessor, // gz transparently compresses the HTTP response if the client supports it. func gz(handler http.HandlerFunc) http.HandlerFunc { + sp := &sync.Pool{ + New: func() interface{} { + return gzip.NewWriter(nil) + }, + } + return func(w http.ResponseWriter, r *http.Request) { if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") { handler(w, r) return } w.Header().Set("Content-Encoding", "gzip") - gz := gzip.NewWriter(w) - defer gz.Close() + gz := sp.Get().(*gzip.Writer) + gz.Reset(w) + defer func() { + _ = gz.Close() + sp.Put(gz) + }() gzw := gzipResponseWriter{Writer: gz, ResponseWriter: w} handler(gzw, r) } diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/iostream_transport.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/iostream_transport.go similarity index 93% rename from src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/iostream_transport.go rename to src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/iostream_transport.go index fea93bcef..1c477990f 100644 --- a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/iostream_transport.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/iostream_transport.go @@ -210,5 +210,13 @@ func (p *StreamTransport) WriteString(s string) (n int, err error) { func (p *StreamTransport) RemainingBytes() (num_bytes uint64) { const maxSize = ^uint64(0) - return maxSize // the thruth is, we just don't know unless framed is used + return maxSize // the truth is, we just don't know unless framed is used } + +// SetTConfiguration implements TConfigurationSetter for propagation. +func (p *StreamTransport) SetTConfiguration(conf *TConfiguration) { + PropagateTConfiguration(p.Reader, conf) + PropagateTConfiguration(p.Writer, conf) +} + +var _ TConfigurationSetter = (*StreamTransport)(nil) diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/json_protocol.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/json_protocol.go similarity index 65% rename from src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/json_protocol.go rename to src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/json_protocol.go index 800ac22c7..8e59d16cf 100644 --- a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/json_protocol.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/json_protocol.go @@ -41,8 +41,8 @@ type TJSONProtocol struct { // Constructor func NewTJSONProtocol(t TTransport) *TJSONProtocol { v := &TJSONProtocol{TSimpleJSONProtocol: NewTSimpleJSONProtocol(t)} - v.parseContextStack = append(v.parseContextStack, int(_CONTEXT_IN_TOPLEVEL)) - v.dumpContext = append(v.dumpContext, int(_CONTEXT_IN_TOPLEVEL)) + v.parseContextStack.push(_CONTEXT_IN_TOPLEVEL) + v.dumpContext.push(_CONTEXT_IN_TOPLEVEL) return v } @@ -57,43 +57,43 @@ func NewTJSONProtocolFactory() *TJSONProtocolFactory { return &TJSONProtocolFactory{} } -func (p *TJSONProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error { +func (p *TJSONProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error { p.resetContextStack() // THRIFT-3735 if e := p.OutputListBegin(); e != nil { return e } - if e := p.WriteI32(THRIFT_JSON_PROTOCOL_VERSION); e != nil { + if e := p.WriteI32(ctx, THRIFT_JSON_PROTOCOL_VERSION); e != nil { return e } - if e := p.WriteString(name); e != nil { + if e := p.WriteString(ctx, name); e != nil { return e } - if e := p.WriteByte(int8(typeId)); e != nil { + if e := p.WriteByte(ctx, int8(typeId)); e != nil { return e } - if e := p.WriteI32(seqId); e != nil { + if e := p.WriteI32(ctx, seqId); e != nil { return e } return nil } -func (p *TJSONProtocol) WriteMessageEnd() error { +func (p *TJSONProtocol) WriteMessageEnd(ctx context.Context) error { return p.OutputListEnd() } -func (p *TJSONProtocol) WriteStructBegin(name string) error { +func (p *TJSONProtocol) WriteStructBegin(ctx context.Context, name string) error { if e := p.OutputObjectBegin(); e != nil { return e } return nil } -func (p *TJSONProtocol) WriteStructEnd() error { +func (p *TJSONProtocol) WriteStructEnd(ctx context.Context) error { return p.OutputObjectEnd() } -func (p *TJSONProtocol) WriteFieldBegin(name string, typeId TType, id int16) error { - if e := p.WriteI16(id); e != nil { +func (p *TJSONProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error { + if e := p.WriteI16(ctx, id); e != nil { return e } if e := p.OutputObjectBegin(); e != nil { @@ -103,19 +103,19 @@ func (p *TJSONProtocol) WriteFieldBegin(name string, typeId TType, id int16) err if e1 != nil { return e1 } - if e := p.WriteString(s); e != nil { + if e := p.WriteString(ctx, s); e != nil { return e } return nil } -func (p *TJSONProtocol) WriteFieldEnd() error { +func (p *TJSONProtocol) WriteFieldEnd(ctx context.Context) error { return p.OutputObjectEnd() } -func (p *TJSONProtocol) WriteFieldStop() error { return nil } +func (p *TJSONProtocol) WriteFieldStop(ctx context.Context) error { return nil } -func (p *TJSONProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error { +func (p *TJSONProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { if e := p.OutputListBegin(); e != nil { return e } @@ -123,77 +123,77 @@ func (p *TJSONProtocol) WriteMapBegin(keyType TType, valueType TType, size int) if e1 != nil { return e1 } - if e := p.WriteString(s); e != nil { + if e := p.WriteString(ctx, s); e != nil { return e } s, e1 = p.TypeIdToString(valueType) if e1 != nil { return e1 } - if e := p.WriteString(s); e != nil { + if e := p.WriteString(ctx, s); e != nil { return e } - if e := p.WriteI64(int64(size)); e != nil { + if e := p.WriteI64(ctx, int64(size)); e != nil { return e } return p.OutputObjectBegin() } -func (p *TJSONProtocol) WriteMapEnd() error { +func (p *TJSONProtocol) WriteMapEnd(ctx context.Context) error { if e := p.OutputObjectEnd(); e != nil { return e } return p.OutputListEnd() } -func (p *TJSONProtocol) WriteListBegin(elemType TType, size int) error { +func (p *TJSONProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { return p.OutputElemListBegin(elemType, size) } -func (p *TJSONProtocol) WriteListEnd() error { +func (p *TJSONProtocol) WriteListEnd(ctx context.Context) error { return p.OutputListEnd() } -func (p *TJSONProtocol) WriteSetBegin(elemType TType, size int) error { +func (p *TJSONProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { return p.OutputElemListBegin(elemType, size) } -func (p *TJSONProtocol) WriteSetEnd() error { +func (p *TJSONProtocol) WriteSetEnd(ctx context.Context) error { return p.OutputListEnd() } -func (p *TJSONProtocol) WriteBool(b bool) error { +func (p *TJSONProtocol) WriteBool(ctx context.Context, b bool) error { if b { - return p.WriteI32(1) + return p.WriteI32(ctx, 1) } - return p.WriteI32(0) + return p.WriteI32(ctx, 0) } -func (p *TJSONProtocol) WriteByte(b int8) error { - return p.WriteI32(int32(b)) +func (p *TJSONProtocol) WriteByte(ctx context.Context, b int8) error { + return p.WriteI32(ctx, int32(b)) } -func (p *TJSONProtocol) WriteI16(v int16) error { - return p.WriteI32(int32(v)) +func (p *TJSONProtocol) WriteI16(ctx context.Context, v int16) error { + return p.WriteI32(ctx, int32(v)) } -func (p *TJSONProtocol) WriteI32(v int32) error { +func (p *TJSONProtocol) WriteI32(ctx context.Context, v int32) error { return p.OutputI64(int64(v)) } -func (p *TJSONProtocol) WriteI64(v int64) error { +func (p *TJSONProtocol) WriteI64(ctx context.Context, v int64) error { return p.OutputI64(int64(v)) } -func (p *TJSONProtocol) WriteDouble(v float64) error { +func (p *TJSONProtocol) WriteDouble(ctx context.Context, v float64) error { return p.OutputF64(v) } -func (p *TJSONProtocol) WriteString(v string) error { +func (p *TJSONProtocol) WriteString(ctx context.Context, v string) error { return p.OutputString(v) } -func (p *TJSONProtocol) WriteBinary(v []byte) error { +func (p *TJSONProtocol) WriteBinary(ctx context.Context, v []byte) error { // JSON library only takes in a string, // not an arbitrary byte array, to ensure bytes are transmitted // efficiently we must convert this into a valid JSON string @@ -219,12 +219,12 @@ func (p *TJSONProtocol) WriteBinary(v []byte) error { } // Reading methods. -func (p *TJSONProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) { +func (p *TJSONProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) { p.resetContextStack() // THRIFT-3735 if isNull, err := p.ParseListBegin(); isNull || err != nil { return name, typeId, seqId, err } - version, err := p.ReadI32() + version, err := p.ReadI32(ctx) if err != nil { return name, typeId, seqId, err } @@ -233,47 +233,47 @@ func (p *TJSONProtocol) ReadMessageBegin() (name string, typeId TMessageType, se return name, typeId, seqId, NewTProtocolExceptionWithType(INVALID_DATA, e) } - if name, err = p.ReadString(); err != nil { + if name, err = p.ReadString(ctx); err != nil { return name, typeId, seqId, err } - bTypeId, err := p.ReadByte() + bTypeId, err := p.ReadByte(ctx) typeId = TMessageType(bTypeId) if err != nil { return name, typeId, seqId, err } - if seqId, err = p.ReadI32(); err != nil { + if seqId, err = p.ReadI32(ctx); err != nil { return name, typeId, seqId, err } return name, typeId, seqId, nil } -func (p *TJSONProtocol) ReadMessageEnd() error { +func (p *TJSONProtocol) ReadMessageEnd(ctx context.Context) error { err := p.ParseListEnd() return err } -func (p *TJSONProtocol) ReadStructBegin() (name string, err error) { +func (p *TJSONProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { _, err = p.ParseObjectStart() return "", err } -func (p *TJSONProtocol) ReadStructEnd() error { +func (p *TJSONProtocol) ReadStructEnd(ctx context.Context) error { return p.ParseObjectEnd() } -func (p *TJSONProtocol) ReadFieldBegin() (string, TType, int16, error) { +func (p *TJSONProtocol) ReadFieldBegin(ctx context.Context) (string, TType, int16, error) { b, _ := p.reader.Peek(1) if len(b) < 1 || b[0] == JSON_RBRACE[0] || b[0] == JSON_RBRACKET[0] { return "", STOP, -1, nil } - fieldId, err := p.ReadI16() + fieldId, err := p.ReadI16(ctx) if err != nil { return "", STOP, fieldId, err } if _, err = p.ParseObjectStart(); err != nil { return "", STOP, fieldId, err } - sType, err := p.ReadString() + sType, err := p.ReadString(ctx) if err != nil { return "", STOP, fieldId, err } @@ -281,17 +281,17 @@ func (p *TJSONProtocol) ReadFieldBegin() (string, TType, int16, error) { return "", fType, fieldId, err } -func (p *TJSONProtocol) ReadFieldEnd() error { +func (p *TJSONProtocol) ReadFieldEnd(ctx context.Context) error { return p.ParseObjectEnd() } -func (p *TJSONProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, e error) { +func (p *TJSONProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, e error) { if isNull, e := p.ParseListBegin(); isNull || e != nil { return VOID, VOID, 0, e } // read keyType - sKeyType, e := p.ReadString() + sKeyType, e := p.ReadString(ctx) if e != nil { return keyType, valueType, size, e } @@ -301,7 +301,7 @@ func (p *TJSONProtocol) ReadMapBegin() (keyType TType, valueType TType, size int } // read valueType - sValueType, e := p.ReadString() + sValueType, e := p.ReadString(ctx) if e != nil { return keyType, valueType, size, e } @@ -311,7 +311,7 @@ func (p *TJSONProtocol) ReadMapBegin() (keyType TType, valueType TType, size int } // read size - iSize, e := p.ReadI64() + iSize, e := p.ReadI64(ctx) if e != nil { return keyType, valueType, size, e } @@ -321,7 +321,7 @@ func (p *TJSONProtocol) ReadMapBegin() (keyType TType, valueType TType, size int return keyType, valueType, size, e } -func (p *TJSONProtocol) ReadMapEnd() error { +func (p *TJSONProtocol) ReadMapEnd(ctx context.Context) error { e := p.ParseObjectEnd() if e != nil { return e @@ -329,53 +329,53 @@ func (p *TJSONProtocol) ReadMapEnd() error { return p.ParseListEnd() } -func (p *TJSONProtocol) ReadListBegin() (elemType TType, size int, e error) { +func (p *TJSONProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, e error) { return p.ParseElemListBegin() } -func (p *TJSONProtocol) ReadListEnd() error { +func (p *TJSONProtocol) ReadListEnd(ctx context.Context) error { return p.ParseListEnd() } -func (p *TJSONProtocol) ReadSetBegin() (elemType TType, size int, e error) { +func (p *TJSONProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, e error) { return p.ParseElemListBegin() } -func (p *TJSONProtocol) ReadSetEnd() error { +func (p *TJSONProtocol) ReadSetEnd(ctx context.Context) error { return p.ParseListEnd() } -func (p *TJSONProtocol) ReadBool() (bool, error) { - value, err := p.ReadI32() +func (p *TJSONProtocol) ReadBool(ctx context.Context) (bool, error) { + value, err := p.ReadI32(ctx) return (value != 0), err } -func (p *TJSONProtocol) ReadByte() (int8, error) { - v, err := p.ReadI64() +func (p *TJSONProtocol) ReadByte(ctx context.Context) (int8, error) { + v, err := p.ReadI64(ctx) return int8(v), err } -func (p *TJSONProtocol) ReadI16() (int16, error) { - v, err := p.ReadI64() +func (p *TJSONProtocol) ReadI16(ctx context.Context) (int16, error) { + v, err := p.ReadI64(ctx) return int16(v), err } -func (p *TJSONProtocol) ReadI32() (int32, error) { - v, err := p.ReadI64() +func (p *TJSONProtocol) ReadI32(ctx context.Context) (int32, error) { + v, err := p.ReadI64(ctx) return int32(v), err } -func (p *TJSONProtocol) ReadI64() (int64, error) { +func (p *TJSONProtocol) ReadI64(ctx context.Context) (int64, error) { v, _, err := p.ParseI64() return v, err } -func (p *TJSONProtocol) ReadDouble() (float64, error) { +func (p *TJSONProtocol) ReadDouble(ctx context.Context) (float64, error) { v, _, err := p.ParseF64() return v, err } -func (p *TJSONProtocol) ReadString() (string, error) { +func (p *TJSONProtocol) ReadString(ctx context.Context) (string, error) { var v string if err := p.ParsePreValue(); err != nil { return v, err @@ -405,7 +405,7 @@ func (p *TJSONProtocol) ReadString() (string, error) { return v, p.ParsePostValue() } -func (p *TJSONProtocol) ReadBinary() ([]byte, error) { +func (p *TJSONProtocol) ReadBinary(ctx context.Context) ([]byte, error) { var v []byte if err := p.ParsePreValue(); err != nil { return nil, err @@ -444,8 +444,8 @@ func (p *TJSONProtocol) Flush(ctx context.Context) (err error) { return NewTProtocolException(err) } -func (p *TJSONProtocol) Skip(fieldType TType) (err error) { - return SkipDefaultDepth(p, fieldType) +func (p *TJSONProtocol) Skip(ctx context.Context, fieldType TType) (err error) { + return SkipDefaultDepth(ctx, p, fieldType) } func (p *TJSONProtocol) Transport() TTransport { @@ -460,10 +460,10 @@ func (p *TJSONProtocol) OutputElemListBegin(elemType TType, size int) error { if e1 != nil { return e1 } - if e := p.WriteString(s); e != nil { + if e := p.OutputString(s); e != nil { return e } - if e := p.WriteI64(int64(size)); e != nil { + if e := p.OutputI64(int64(size)); e != nil { return e } return nil @@ -473,7 +473,11 @@ func (p *TJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) if isNull, e := p.ParseListBegin(); isNull || e != nil { return VOID, 0, e } - sElemType, err := p.ReadString() + // We don't really use the ctx in ReadString implementation, + // so this is safe for now. + // We might want to add context to ParseElemListBegin if we start to use + // ctx in ReadString implementation in the future. + sElemType, err := p.ReadString(context.Background()) if err != nil { return VOID, size, err } @@ -481,7 +485,7 @@ func (p *TJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) if err != nil { return elemType, size, err } - nSize, err2 := p.ReadI64() + nSize, _, err2 := p.ParseI64() size = int(nSize) return elemType, size, err2 } @@ -490,7 +494,11 @@ func (p *TJSONProtocol) readElemListBegin() (elemType TType, size int, e error) if isNull, e := p.ParseListBegin(); isNull || e != nil { return VOID, 0, e } - sElemType, err := p.ReadString() + // We don't really use the ctx in ReadString implementation, + // so this is safe for now. + // We might want to add context to ParseElemListBegin if we start to use + // ctx in ReadString implementation in the future. + sElemType, err := p.ReadString(context.Background()) if err != nil { return VOID, size, err } @@ -498,7 +506,7 @@ func (p *TJSONProtocol) readElemListBegin() (elemType TType, size int, e error) if err != nil { return elemType, size, err } - nSize, err2 := p.ReadI64() + nSize, _, err2 := p.ParseI64() size = int(nSize) return elemType, size, err2 } @@ -579,3 +587,5 @@ func (p *TJSONProtocol) StringToTypeId(fieldType string) (TType, error) { e := fmt.Errorf("Unknown type identifier: %s", fieldType) return TType(STOP), NewTProtocolExceptionWithType(INVALID_DATA, e) } + +var _ TConfigurationSetter = (*TJSONProtocol)(nil) diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/logger.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/logger.go new file mode 100644 index 000000000..c42aac998 --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/logger.go @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "log" + "os" + "testing" +) + +// Logger is a simple wrapper of a logging function. +// +// In reality the users might actually use different logging libraries, and they +// are not always compatible with each other. +// +// Logger is meant to be a simple common ground that it's easy to wrap whatever +// logging library they use into. +// +// See https://issues.apache.org/jira/browse/THRIFT-4985 for the design +// discussion behind it. +type Logger func(msg string) + +// NopLogger is a Logger implementation that does nothing. +func NopLogger(msg string) {} + +// StdLogger wraps stdlib log package into a Logger. +// +// If logger passed in is nil, it will fallback to use stderr and default flags. +func StdLogger(logger *log.Logger) Logger { + if logger == nil { + logger = log.New(os.Stderr, "", log.LstdFlags) + } + return func(msg string) { + logger.Print(msg) + } +} + +// TestLogger is a Logger implementation can be used in test codes. +// +// It fails the test when being called. +func TestLogger(tb testing.TB) Logger { + return func(msg string) { + tb.Errorf("logger called with msg: %q", msg) + } +} + +func fallbackLogger(logger Logger) Logger { + if logger == nil { + return StdLogger(nil) + } + return logger +} diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/memory_buffer.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/memory_buffer.go similarity index 100% rename from src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/memory_buffer.go rename to src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/memory_buffer.go diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/messagetype.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/messagetype.go similarity index 100% rename from src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/messagetype.go rename to src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/messagetype.go diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/middleware.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/middleware.go new file mode 100644 index 000000000..8a788df02 --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/middleware.go @@ -0,0 +1,109 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import "context" + +// ProcessorMiddleware is a function that can be passed to WrapProcessor to wrap the +// TProcessorFunctions for that TProcessor. +// +// Middlewares are passed in the name of the function as set in the processor +// map of the TProcessor. +type ProcessorMiddleware func(name string, next TProcessorFunction) TProcessorFunction + +// WrapProcessor takes an existing TProcessor and wraps each of its inner +// TProcessorFunctions with the middlewares passed in and returns it. +// +// Middlewares will be called in the order that they are defined: +// +// 1. Middlewares[0] +// 2. Middlewares[1] +// ... +// N. Middlewares[n] +func WrapProcessor(processor TProcessor, middlewares ...ProcessorMiddleware) TProcessor { + for name, processorFunc := range processor.ProcessorMap() { + wrapped := processorFunc + // Add middlewares in reverse so the first in the list is the outermost. + for i := len(middlewares) - 1; i >= 0; i-- { + wrapped = middlewares[i](name, wrapped) + } + processor.AddToProcessorMap(name, wrapped) + } + return processor +} + +// WrappedTProcessorFunction is a convenience struct that implements the +// TProcessorFunction interface that can be used when implementing custom +// Middleware. +type WrappedTProcessorFunction struct { + // Wrapped is called by WrappedTProcessorFunction.Process and should be a + // "wrapped" call to a base TProcessorFunc.Process call. + Wrapped func(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException) +} + +// Process implements the TProcessorFunction interface using p.Wrapped. +func (p WrappedTProcessorFunction) Process(ctx context.Context, seqID int32, in, out TProtocol) (bool, TException) { + return p.Wrapped(ctx, seqID, in, out) +} + +// verify that WrappedTProcessorFunction implements TProcessorFunction +var ( + _ TProcessorFunction = WrappedTProcessorFunction{} + _ TProcessorFunction = (*WrappedTProcessorFunction)(nil) +) + +// ClientMiddleware can be passed to WrapClient in order to wrap TClient calls +// with custom middleware. +type ClientMiddleware func(TClient) TClient + +// WrappedTClient is a convenience struct that implements the TClient interface +// using inner Wrapped function. +// +// This is provided to aid in developing ClientMiddleware. +type WrappedTClient struct { + Wrapped func(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) +} + +// Call implements the TClient interface by calling and returning c.Wrapped. +func (c WrappedTClient) Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) { + return c.Wrapped(ctx, method, args, result) +} + +// verify that WrappedTClient implements TClient +var ( + _ TClient = WrappedTClient{} + _ TClient = (*WrappedTClient)(nil) +) + +// WrapClient wraps the given TClient in the given middlewares. +// +// Middlewares will be called in the order that they are defined: +// +// 1. Middlewares[0] +// 2. Middlewares[1] +// ... +// N. Middlewares[n] +func WrapClient(client TClient, middlewares ...ClientMiddleware) TClient { + // Add middlewares in reverse so the first in the list is the outermost. + for i := len(middlewares) - 1; i >= 0; i-- { + client = middlewares[i](client) + } + return client +} diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/multiplexed_protocol.go similarity index 54% rename from src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol.go rename to src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/multiplexed_protocol.go index d028a30b3..cacbf6bef 100644 --- a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/multiplexed_protocol.go @@ -68,11 +68,11 @@ func NewTMultiplexedProtocol(protocol TProtocol, serviceName string) *TMultiplex } } -func (t *TMultiplexedProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error { +func (t *TMultiplexedProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error { if typeId == CALL || typeId == ONEWAY { - return t.TProtocol.WriteMessageBegin(t.serviceName+MULTIPLEXED_SEPARATOR+name, typeId, seqid) + return t.TProtocol.WriteMessageBegin(ctx, t.serviceName+MULTIPLEXED_SEPARATOR+name, typeId, seqid) } else { - return t.TProtocol.WriteMessageBegin(name, typeId, seqid) + return t.TProtocol.WriteMessageBegin(ctx, name, typeId, seqid) } } @@ -117,6 +117,67 @@ func NewTMultiplexedProcessor() *TMultiplexedProcessor { } } +// ProcessorMap returns a mapping of "{ProcessorName}{MULTIPLEXED_SEPARATOR}{FunctionName}" +// to TProcessorFunction for any registered processors. If there is also a +// DefaultProcessor, the keys for the methods on that processor will simply be +// "{FunctionName}". If the TMultiplexedProcessor has both a DefaultProcessor and +// other registered processors, then the keys will be a mix of both formats. +// +// The implementation differs with other TProcessors in that the map returned is +// a new map, while most TProcessors just return their internal mapping directly. +// This means that edits to the map returned by this implementation of ProcessorMap +// will not affect the underlying mapping within the TMultiplexedProcessor. +func (t *TMultiplexedProcessor) ProcessorMap() map[string]TProcessorFunction { + processorFuncMap := make(map[string]TProcessorFunction) + for name, processor := range t.serviceProcessorMap { + for method, processorFunc := range processor.ProcessorMap() { + processorFuncName := name + MULTIPLEXED_SEPARATOR + method + processorFuncMap[processorFuncName] = processorFunc + } + } + if t.DefaultProcessor != nil { + for method, processorFunc := range t.DefaultProcessor.ProcessorMap() { + processorFuncMap[method] = processorFunc + } + } + return processorFuncMap +} + +// AddToProcessorMap updates the underlying TProcessor ProccessorMaps depending on +// the format of "name". +// +// If "name" is in the format "{ProcessorName}{MULTIPLEXED_SEPARATOR}{FunctionName}", +// then it sets the given TProcessorFunction on the inner TProcessor with the +// ProcessorName component using the FunctionName component. +// +// If "name" is just in the format "{FunctionName}", that is to say there is no +// MULTIPLEXED_SEPARATOR, and the TMultiplexedProcessor has a DefaultProcessor +// configured, then it will set the given TProcessorFunction on the DefaultProcessor +// using the given name. +// +// If there is not a TProcessor available for the given name, then this function +// does nothing. This can happen when there is no TProcessor registered for +// the given ProcessorName or if all that is given is the FunctionName and there +// is no DefaultProcessor set. +func (t *TMultiplexedProcessor) AddToProcessorMap(name string, processorFunc TProcessorFunction) { + components := strings.SplitN(name, MULTIPLEXED_SEPARATOR, 2) + if len(components) != 2 { + if t.DefaultProcessor != nil && len(components) == 1 { + t.DefaultProcessor.AddToProcessorMap(components[0], processorFunc) + } + return + } + processorName := components[0] + funcName := components[1] + if processor, ok := t.serviceProcessorMap[processorName]; ok { + processor.AddToProcessorMap(funcName, processorFunc) + } + +} + +// verify that TMultiplexedProcessor implements TProcessor +var _ TProcessor = (*TMultiplexedProcessor)(nil) + func (t *TMultiplexedProcessor) RegisterDefault(processor TProcessor) { t.DefaultProcessor = processor } @@ -129,12 +190,12 @@ func (t *TMultiplexedProcessor) RegisterProcessor(name string, processor TProces } func (t *TMultiplexedProcessor) Process(ctx context.Context, in, out TProtocol) (bool, TException) { - name, typeId, seqid, err := in.ReadMessageBegin() + name, typeId, seqid, err := in.ReadMessageBegin(ctx) if err != nil { - return false, err + return false, NewTProtocolException(err) } if typeId != CALL && typeId != ONEWAY { - return false, fmt.Errorf("Unexpected message type %v", typeId) + return false, NewTProtocolException(fmt.Errorf("Unexpected message type %v", typeId)) } //extract the service name v := strings.SplitN(name, MULTIPLEXED_SEPARATOR, 2) @@ -143,11 +204,17 @@ func (t *TMultiplexedProcessor) Process(ctx context.Context, in, out TProtocol) smb := NewStoredMessageProtocol(in, name, typeId, seqid) return t.DefaultProcessor.Process(ctx, smb, out) } - return false, fmt.Errorf("Service name not found in message name: %s. Did you forget to use a TMultiplexProtocol in your client?", name) + return false, NewTProtocolException(fmt.Errorf( + "Service name not found in message name: %s. Did you forget to use a TMultiplexProtocol in your client?", + name, + )) } actualProcessor, ok := t.serviceProcessorMap[v[0]] if !ok { - return false, fmt.Errorf("Service name not found: %s. Did you forget to call registerProcessor()?", v[0]) + return false, NewTProtocolException(fmt.Errorf( + "Service name not found: %s. Did you forget to call registerProcessor()?", + v[0], + )) } smb := NewStoredMessageProtocol(in, v[1], typeId, seqid) return actualProcessor.Process(ctx, smb, out) @@ -165,6 +232,6 @@ func NewStoredMessageProtocol(protocol TProtocol, name string, typeId TMessageTy return &storedMessageProtocol{protocol, name, typeId, seqid} } -func (s *storedMessageProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error) { +func (s *storedMessageProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error) { return s.name, s.typeId, s.seqid, nil } diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/numeric.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/numeric.go similarity index 97% rename from src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/numeric.go rename to src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/numeric.go index aa8daa9b5..e4512d204 100644 --- a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/numeric.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/numeric.go @@ -69,14 +69,14 @@ func NewNumericFromDouble(dValue float64) Numeric { func NewNumericFromI64(iValue int64) Numeric { dValue := float64(iValue) - sValue := string(iValue) + sValue := strconv.FormatInt(iValue, 10) isNil := false return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} } func NewNumericFromI32(iValue int32) Numeric { dValue := float64(iValue) - sValue := string(iValue) + sValue := strconv.FormatInt(int64(iValue), 10) isNil := false return &numeric{iValue: int64(iValue), dValue: dValue, sValue: sValue, isNil: isNil} } diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/pointerize.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/pointerize.go similarity index 100% rename from src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/pointerize.go rename to src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/pointerize.go diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/processor_factory.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/processor_factory.go similarity index 84% rename from src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/processor_factory.go rename to src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/processor_factory.go index e4b132b30..245a3ccfc 100644 --- a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/processor_factory.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/processor_factory.go @@ -25,6 +25,16 @@ import "context" // writes to some output stream. type TProcessor interface { Process(ctx context.Context, in, out TProtocol) (bool, TException) + + // ProcessorMap returns a map of thrift method names to TProcessorFunctions. + ProcessorMap() map[string]TProcessorFunction + + // AddToProcessorMap adds the given TProcessorFunction to the internal + // processor map at the given key. + // + // If one is already set at the given key, it will be replaced with the new + // TProcessorFunction. + AddToProcessorMap(string, TProcessorFunction) } type TProcessorFunction interface { diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol.go new file mode 100644 index 000000000..0a69bd416 --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol.go @@ -0,0 +1,177 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "errors" + "fmt" +) + +const ( + VERSION_MASK = 0xffff0000 + VERSION_1 = 0x80010000 +) + +type TProtocol interface { + WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error + WriteMessageEnd(ctx context.Context) error + WriteStructBegin(ctx context.Context, name string) error + WriteStructEnd(ctx context.Context) error + WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error + WriteFieldEnd(ctx context.Context) error + WriteFieldStop(ctx context.Context) error + WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error + WriteMapEnd(ctx context.Context) error + WriteListBegin(ctx context.Context, elemType TType, size int) error + WriteListEnd(ctx context.Context) error + WriteSetBegin(ctx context.Context, elemType TType, size int) error + WriteSetEnd(ctx context.Context) error + WriteBool(ctx context.Context, value bool) error + WriteByte(ctx context.Context, value int8) error + WriteI16(ctx context.Context, value int16) error + WriteI32(ctx context.Context, value int32) error + WriteI64(ctx context.Context, value int64) error + WriteDouble(ctx context.Context, value float64) error + WriteString(ctx context.Context, value string) error + WriteBinary(ctx context.Context, value []byte) error + + ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error) + ReadMessageEnd(ctx context.Context) error + ReadStructBegin(ctx context.Context) (name string, err error) + ReadStructEnd(ctx context.Context) error + ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error) + ReadFieldEnd(ctx context.Context) error + ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) + ReadMapEnd(ctx context.Context) error + ReadListBegin(ctx context.Context) (elemType TType, size int, err error) + ReadListEnd(ctx context.Context) error + ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) + ReadSetEnd(ctx context.Context) error + ReadBool(ctx context.Context) (value bool, err error) + ReadByte(ctx context.Context) (value int8, err error) + ReadI16(ctx context.Context) (value int16, err error) + ReadI32(ctx context.Context) (value int32, err error) + ReadI64(ctx context.Context) (value int64, err error) + ReadDouble(ctx context.Context) (value float64, err error) + ReadString(ctx context.Context) (value string, err error) + ReadBinary(ctx context.Context) (value []byte, err error) + + Skip(ctx context.Context, fieldType TType) (err error) + Flush(ctx context.Context) (err error) + + Transport() TTransport +} + +// The maximum recursive depth the skip() function will traverse +const DEFAULT_RECURSION_DEPTH = 64 + +// Skips over the next data element from the provided input TProtocol object. +func SkipDefaultDepth(ctx context.Context, prot TProtocol, typeId TType) (err error) { + return Skip(ctx, prot, typeId, DEFAULT_RECURSION_DEPTH) +} + +// Skips over the next data element from the provided input TProtocol object. +func Skip(ctx context.Context, self TProtocol, fieldType TType, maxDepth int) (err error) { + + if maxDepth <= 0 { + return NewTProtocolExceptionWithType(DEPTH_LIMIT, errors.New("Depth limit exceeded")) + } + + switch fieldType { + case BOOL: + _, err = self.ReadBool(ctx) + return + case BYTE: + _, err = self.ReadByte(ctx) + return + case I16: + _, err = self.ReadI16(ctx) + return + case I32: + _, err = self.ReadI32(ctx) + return + case I64: + _, err = self.ReadI64(ctx) + return + case DOUBLE: + _, err = self.ReadDouble(ctx) + return + case STRING: + _, err = self.ReadString(ctx) + return + case STRUCT: + if _, err = self.ReadStructBegin(ctx); err != nil { + return err + } + for { + _, typeId, _, _ := self.ReadFieldBegin(ctx) + if typeId == STOP { + break + } + err := Skip(ctx, self, typeId, maxDepth-1) + if err != nil { + return err + } + self.ReadFieldEnd(ctx) + } + return self.ReadStructEnd(ctx) + case MAP: + keyType, valueType, size, err := self.ReadMapBegin(ctx) + if err != nil { + return err + } + for i := 0; i < size; i++ { + err := Skip(ctx, self, keyType, maxDepth-1) + if err != nil { + return err + } + self.Skip(ctx, valueType) + } + return self.ReadMapEnd(ctx) + case SET: + elemType, size, err := self.ReadSetBegin(ctx) + if err != nil { + return err + } + for i := 0; i < size; i++ { + err := Skip(ctx, self, elemType, maxDepth-1) + if err != nil { + return err + } + } + return self.ReadSetEnd(ctx) + case LIST: + elemType, size, err := self.ReadListBegin(ctx) + if err != nil { + return err + } + for i := 0; i < size; i++ { + err := Skip(ctx, self, elemType, maxDepth-1) + if err != nil { + return err + } + } + return self.ReadListEnd(ctx) + default: + return NewTProtocolExceptionWithType(INVALID_DATA, errors.New(fmt.Sprintf("Unknown data type %d", fieldType))) + } + return nil +} diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/protocol_exception.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_exception.go similarity index 68% rename from src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/protocol_exception.go rename to src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_exception.go index 29ab75d92..9dcf4bfd9 100644 --- a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/protocol_exception.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_exception.go @@ -21,6 +21,7 @@ package thrift import ( "encoding/base64" + "errors" ) // Thrift Protocol exception @@ -40,8 +41,15 @@ const ( ) type tProtocolException struct { - typeId int - message string + typeId int + err error + msg string +} + +var _ TProtocolException = (*tProtocolException)(nil) + +func (tProtocolException) TExceptionType() TExceptionType { + return TExceptionTypeProtocol } func (p *tProtocolException) TypeId() int { @@ -49,29 +57,48 @@ func (p *tProtocolException) TypeId() int { } func (p *tProtocolException) String() string { - return p.message + return p.msg } func (p *tProtocolException) Error() string { - return p.message + return p.msg +} + +func (p *tProtocolException) Unwrap() error { + return p.err } func NewTProtocolException(err error) TProtocolException { if err == nil { return nil } + if e, ok := err.(TProtocolException); ok { return e } - if _, ok := err.(base64.CorruptInputError); ok { - return &tProtocolException{INVALID_DATA, err.Error()} + + if errors.As(err, new(base64.CorruptInputError)) { + return NewTProtocolExceptionWithType(INVALID_DATA, err) } - return &tProtocolException{UNKNOWN_PROTOCOL_EXCEPTION, err.Error()} + + return NewTProtocolExceptionWithType(UNKNOWN_PROTOCOL_EXCEPTION, err) } func NewTProtocolExceptionWithType(errType int, err error) TProtocolException { if err == nil { return nil } - return &tProtocolException{errType, err.Error()} + return &tProtocolException{ + typeId: errType, + err: err, + msg: err.Error(), + } +} + +func prependTProtocolException(prepend string, err TProtocolException) TProtocolException { + return &tProtocolException{ + typeId: err.TypeId(), + err: err, + msg: prepend + err.Error(), + } } diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/protocol_factory.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_factory.go similarity index 100% rename from src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/protocol_factory.go rename to src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_factory.go diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/response_helper.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/response_helper.go new file mode 100644 index 000000000..d884c6ac6 --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/response_helper.go @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" +) + +// See https://godoc.org/context#WithValue on why do we need the unexported typedefs. +type responseHelperKey struct{} + +// TResponseHelper defines a object with a set of helper functions that can be +// retrieved from the context object passed into server handler functions. +// +// Use GetResponseHelper to retrieve the injected TResponseHelper implementation +// from the context object. +// +// The zero value of TResponseHelper is valid with all helper functions being +// no-op. +type TResponseHelper struct { + // THeader related functions + *THeaderResponseHelper +} + +// THeaderResponseHelper defines THeader related TResponseHelper functions. +// +// The zero value of *THeaderResponseHelper is valid with all helper functions +// being no-op. +type THeaderResponseHelper struct { + proto *THeaderProtocol +} + +// NewTHeaderResponseHelper creates a new THeaderResponseHelper from the +// underlying TProtocol. +func NewTHeaderResponseHelper(proto TProtocol) *THeaderResponseHelper { + if hp, ok := proto.(*THeaderProtocol); ok { + return &THeaderResponseHelper{ + proto: hp, + } + } + return nil +} + +// SetHeader sets a response header. +// +// It's no-op if the underlying protocol/transport does not support THeader. +func (h *THeaderResponseHelper) SetHeader(key, value string) { + if h != nil && h.proto != nil { + h.proto.SetWriteHeader(key, value) + } +} + +// ClearHeaders clears all the response headers previously set. +// +// It's no-op if the underlying protocol/transport does not support THeader. +func (h *THeaderResponseHelper) ClearHeaders() { + if h != nil && h.proto != nil { + h.proto.ClearWriteHeaders() + } +} + +// GetResponseHelper retrieves the TResponseHelper implementation injected into +// the context object. +// +// If no helper was found in the context object, a nop helper with ok == false +// will be returned. +func GetResponseHelper(ctx context.Context) (helper TResponseHelper, ok bool) { + if v := ctx.Value(responseHelperKey{}); v != nil { + helper, ok = v.(TResponseHelper) + } + return +} + +// SetResponseHelper injects TResponseHelper into the context object. +func SetResponseHelper(ctx context.Context, helper TResponseHelper) context.Context { + return context.WithValue(ctx, responseHelperKey{}, helper) +} diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/rich_transport.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/rich_transport.go similarity index 95% rename from src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/rich_transport.go rename to src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/rich_transport.go index 4025bebea..83fdf29f5 100644 --- a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/rich_transport.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/rich_transport.go @@ -19,7 +19,10 @@ package thrift -import "io" +import ( + "errors" + "io" +) type RichTransport struct { TTransport @@ -49,7 +52,7 @@ func (r *RichTransport) RemainingBytes() (num_bytes uint64) { func readByte(r io.Reader) (c byte, err error) { v := [1]byte{0} n, err := r.Read(v[0:1]) - if n > 0 && (err == nil || err == io.EOF) { + if n > 0 && (err == nil || errors.Is(err, io.EOF)) { return v[0], nil } if n > 0 && err != nil { diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/serializer.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/serializer.go new file mode 100644 index 000000000..c44979094 --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/serializer.go @@ -0,0 +1,136 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "sync" +) + +type TSerializer struct { + Transport *TMemoryBuffer + Protocol TProtocol +} + +type TStruct interface { + Write(ctx context.Context, p TProtocol) error + Read(ctx context.Context, p TProtocol) error +} + +func NewTSerializer() *TSerializer { + transport := NewTMemoryBufferLen(1024) + protocol := NewTBinaryProtocolTransport(transport) + + return &TSerializer{ + Transport: transport, + Protocol: protocol, + } +} + +func (t *TSerializer) WriteString(ctx context.Context, msg TStruct) (s string, err error) { + t.Transport.Reset() + + if err = msg.Write(ctx, t.Protocol); err != nil { + return + } + + if err = t.Protocol.Flush(ctx); err != nil { + return + } + if err = t.Transport.Flush(ctx); err != nil { + return + } + + return t.Transport.String(), nil +} + +func (t *TSerializer) Write(ctx context.Context, msg TStruct) (b []byte, err error) { + t.Transport.Reset() + + if err = msg.Write(ctx, t.Protocol); err != nil { + return + } + + if err = t.Protocol.Flush(ctx); err != nil { + return + } + + if err = t.Transport.Flush(ctx); err != nil { + return + } + + b = append(b, t.Transport.Bytes()...) + return +} + +// TSerializerPool is the thread-safe version of TSerializer, it uses resource +// pool of TSerializer under the hood. +// +// It must be initialized with either NewTSerializerPool or +// NewTSerializerPoolSizeFactory. +type TSerializerPool struct { + pool sync.Pool +} + +// NewTSerializerPool creates a new TSerializerPool. +// +// NewTSerializer can be used as the arg here. +func NewTSerializerPool(f func() *TSerializer) *TSerializerPool { + return &TSerializerPool{ + pool: sync.Pool{ + New: func() interface{} { + return f() + }, + }, + } +} + +// NewTSerializerPoolSizeFactory creates a new TSerializerPool with the given +// size and protocol factory. +// +// Note that the size is not the limit. The TMemoryBuffer underneath can grow +// larger than that. It just dictates the initial size. +func NewTSerializerPoolSizeFactory(size int, factory TProtocolFactory) *TSerializerPool { + return &TSerializerPool{ + pool: sync.Pool{ + New: func() interface{} { + transport := NewTMemoryBufferLen(size) + protocol := factory.GetProtocol(transport) + + return &TSerializer{ + Transport: transport, + Protocol: protocol, + } + }, + }, + } +} + +func (t *TSerializerPool) WriteString(ctx context.Context, msg TStruct) (string, error) { + s := t.pool.Get().(*TSerializer) + defer t.pool.Put(s) + return s.WriteString(ctx, msg) +} + +func (t *TSerializerPool) Write(ctx context.Context, msg TStruct) ([]byte, error) { + s := t.pool.Get().(*TSerializer) + defer t.pool.Put(s) + return s.Write(ctx, msg) +} diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/server.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server.go similarity index 100% rename from src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/server.go rename to src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server.go diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/server_socket.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_socket.go similarity index 100% rename from src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/server_socket.go rename to src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_socket.go diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/server_transport.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_transport.go similarity index 100% rename from src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/server_transport.go rename to src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_transport.go diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/simple_json_protocol.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_json_protocol.go similarity index 79% rename from src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/simple_json_protocol.go rename to src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_json_protocol.go index f5e0c05d1..d1a815453 100644 --- a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/simple_json_protocol.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_json_protocol.go @@ -25,6 +25,7 @@ import ( "context" "encoding/base64" "encoding/json" + "errors" "fmt" "io" "math" @@ -34,12 +35,13 @@ import ( type _ParseContext int const ( - _CONTEXT_IN_TOPLEVEL _ParseContext = 1 - _CONTEXT_IN_LIST_FIRST _ParseContext = 2 - _CONTEXT_IN_LIST _ParseContext = 3 - _CONTEXT_IN_OBJECT_FIRST _ParseContext = 4 - _CONTEXT_IN_OBJECT_NEXT_KEY _ParseContext = 5 - _CONTEXT_IN_OBJECT_NEXT_VALUE _ParseContext = 6 + _CONTEXT_INVALID _ParseContext = iota + _CONTEXT_IN_TOPLEVEL // 1 + _CONTEXT_IN_LIST_FIRST // 2 + _CONTEXT_IN_LIST // 3 + _CONTEXT_IN_OBJECT_FIRST // 4 + _CONTEXT_IN_OBJECT_NEXT_KEY // 5 + _CONTEXT_IN_OBJECT_NEXT_VALUE // 6 ) func (p _ParseContext) String() string { @@ -60,6 +62,32 @@ func (p _ParseContext) String() string { return "UNKNOWN-PARSE-CONTEXT" } +type jsonContextStack []_ParseContext + +func (s *jsonContextStack) push(v _ParseContext) { + *s = append(*s, v) +} + +func (s jsonContextStack) peek() (v _ParseContext, ok bool) { + l := len(s) + if l <= 0 { + return + } + return s[l-1], true +} + +func (s *jsonContextStack) pop() (v _ParseContext, ok bool) { + l := len(*s) + if l <= 0 { + return + } + v = (*s)[l-1] + *s = (*s)[0 : l-1] + return v, true +} + +var errEmptyJSONContextStack = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Unexpected empty json protocol context stack")) + // Simple JSON protocol implementation for thrift. // // This protocol produces/consumes a simple output format @@ -69,8 +97,8 @@ func (p _ParseContext) String() string { type TSimpleJSONProtocol struct { trans TTransport - parseContextStack []int - dumpContext []int + parseContextStack jsonContextStack + dumpContext jsonContextStack writer *bufio.Writer reader *bufio.Reader @@ -82,8 +110,8 @@ func NewTSimpleJSONProtocol(t TTransport) *TSimpleJSONProtocol { writer: bufio.NewWriter(t), reader: bufio.NewReader(t), } - v.parseContextStack = append(v.parseContextStack, int(_CONTEXT_IN_TOPLEVEL)) - v.dumpContext = append(v.dumpContext, int(_CONTEXT_IN_TOPLEVEL)) + v.parseContextStack.push(_CONTEXT_IN_TOPLEVEL) + v.dumpContext.push(_CONTEXT_IN_TOPLEVEL) return v } @@ -156,114 +184,113 @@ func mismatch(expected, actual string) error { return fmt.Errorf("Expected '%s' but found '%s' while parsing JSON.", expected, actual) } -func (p *TSimpleJSONProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error { +func (p *TSimpleJSONProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error { p.resetContextStack() // THRIFT-3735 if e := p.OutputListBegin(); e != nil { return e } - if e := p.WriteString(name); e != nil { + if e := p.WriteString(ctx, name); e != nil { return e } - if e := p.WriteByte(int8(typeId)); e != nil { + if e := p.WriteByte(ctx, int8(typeId)); e != nil { return e } - if e := p.WriteI32(seqId); e != nil { + if e := p.WriteI32(ctx, seqId); e != nil { return e } return nil } -func (p *TSimpleJSONProtocol) WriteMessageEnd() error { +func (p *TSimpleJSONProtocol) WriteMessageEnd(ctx context.Context) error { return p.OutputListEnd() } -func (p *TSimpleJSONProtocol) WriteStructBegin(name string) error { +func (p *TSimpleJSONProtocol) WriteStructBegin(ctx context.Context, name string) error { if e := p.OutputObjectBegin(); e != nil { return e } return nil } -func (p *TSimpleJSONProtocol) WriteStructEnd() error { +func (p *TSimpleJSONProtocol) WriteStructEnd(ctx context.Context) error { return p.OutputObjectEnd() } -func (p *TSimpleJSONProtocol) WriteFieldBegin(name string, typeId TType, id int16) error { - if e := p.WriteString(name); e != nil { +func (p *TSimpleJSONProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error { + if e := p.WriteString(ctx, name); e != nil { return e } return nil } -func (p *TSimpleJSONProtocol) WriteFieldEnd() error { - //return p.OutputListEnd() +func (p *TSimpleJSONProtocol) WriteFieldEnd(ctx context.Context) error { return nil } -func (p *TSimpleJSONProtocol) WriteFieldStop() error { return nil } +func (p *TSimpleJSONProtocol) WriteFieldStop(ctx context.Context) error { return nil } -func (p *TSimpleJSONProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error { +func (p *TSimpleJSONProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { if e := p.OutputListBegin(); e != nil { return e } - if e := p.WriteByte(int8(keyType)); e != nil { + if e := p.WriteByte(ctx, int8(keyType)); e != nil { return e } - if e := p.WriteByte(int8(valueType)); e != nil { + if e := p.WriteByte(ctx, int8(valueType)); e != nil { return e } - return p.WriteI32(int32(size)) + return p.WriteI32(ctx, int32(size)) } -func (p *TSimpleJSONProtocol) WriteMapEnd() error { +func (p *TSimpleJSONProtocol) WriteMapEnd(ctx context.Context) error { return p.OutputListEnd() } -func (p *TSimpleJSONProtocol) WriteListBegin(elemType TType, size int) error { +func (p *TSimpleJSONProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { return p.OutputElemListBegin(elemType, size) } -func (p *TSimpleJSONProtocol) WriteListEnd() error { +func (p *TSimpleJSONProtocol) WriteListEnd(ctx context.Context) error { return p.OutputListEnd() } -func (p *TSimpleJSONProtocol) WriteSetBegin(elemType TType, size int) error { +func (p *TSimpleJSONProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { return p.OutputElemListBegin(elemType, size) } -func (p *TSimpleJSONProtocol) WriteSetEnd() error { +func (p *TSimpleJSONProtocol) WriteSetEnd(ctx context.Context) error { return p.OutputListEnd() } -func (p *TSimpleJSONProtocol) WriteBool(b bool) error { +func (p *TSimpleJSONProtocol) WriteBool(ctx context.Context, b bool) error { return p.OutputBool(b) } -func (p *TSimpleJSONProtocol) WriteByte(b int8) error { - return p.WriteI32(int32(b)) +func (p *TSimpleJSONProtocol) WriteByte(ctx context.Context, b int8) error { + return p.WriteI32(ctx, int32(b)) } -func (p *TSimpleJSONProtocol) WriteI16(v int16) error { - return p.WriteI32(int32(v)) +func (p *TSimpleJSONProtocol) WriteI16(ctx context.Context, v int16) error { + return p.WriteI32(ctx, int32(v)) } -func (p *TSimpleJSONProtocol) WriteI32(v int32) error { +func (p *TSimpleJSONProtocol) WriteI32(ctx context.Context, v int32) error { return p.OutputI64(int64(v)) } -func (p *TSimpleJSONProtocol) WriteI64(v int64) error { +func (p *TSimpleJSONProtocol) WriteI64(ctx context.Context, v int64) error { return p.OutputI64(int64(v)) } -func (p *TSimpleJSONProtocol) WriteDouble(v float64) error { +func (p *TSimpleJSONProtocol) WriteDouble(ctx context.Context, v float64) error { return p.OutputF64(v) } -func (p *TSimpleJSONProtocol) WriteString(v string) error { +func (p *TSimpleJSONProtocol) WriteString(ctx context.Context, v string) error { return p.OutputString(v) } -func (p *TSimpleJSONProtocol) WriteBinary(v []byte) error { +func (p *TSimpleJSONProtocol) WriteBinary(ctx context.Context, v []byte) error { // JSON library only takes in a string, // not an arbitrary byte array, to ensure bytes are transmitted // efficiently we must convert this into a valid JSON string @@ -289,39 +316,39 @@ func (p *TSimpleJSONProtocol) WriteBinary(v []byte) error { } // Reading methods. -func (p *TSimpleJSONProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) { +func (p *TSimpleJSONProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) { p.resetContextStack() // THRIFT-3735 if isNull, err := p.ParseListBegin(); isNull || err != nil { return name, typeId, seqId, err } - if name, err = p.ReadString(); err != nil { + if name, err = p.ReadString(ctx); err != nil { return name, typeId, seqId, err } - bTypeId, err := p.ReadByte() + bTypeId, err := p.ReadByte(ctx) typeId = TMessageType(bTypeId) if err != nil { return name, typeId, seqId, err } - if seqId, err = p.ReadI32(); err != nil { + if seqId, err = p.ReadI32(ctx); err != nil { return name, typeId, seqId, err } return name, typeId, seqId, nil } -func (p *TSimpleJSONProtocol) ReadMessageEnd() error { +func (p *TSimpleJSONProtocol) ReadMessageEnd(ctx context.Context) error { return p.ParseListEnd() } -func (p *TSimpleJSONProtocol) ReadStructBegin() (name string, err error) { +func (p *TSimpleJSONProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { _, err = p.ParseObjectStart() return "", err } -func (p *TSimpleJSONProtocol) ReadStructEnd() error { +func (p *TSimpleJSONProtocol) ReadStructEnd(ctx context.Context) error { return p.ParseObjectEnd() } -func (p *TSimpleJSONProtocol) ReadFieldBegin() (string, TType, int16, error) { +func (p *TSimpleJSONProtocol) ReadFieldBegin(ctx context.Context) (string, TType, int16, error) { if err := p.ParsePreValue(); err != nil { return "", STOP, 0, err } @@ -340,21 +367,6 @@ func (p *TSimpleJSONProtocol) ReadFieldBegin() (string, TType, int16, error) { return name, STOP, 0, err } return name, STOP, -1, p.ParsePostValue() - /* - if err = p.ParsePostValue(); err != nil { - return name, STOP, 0, err - } - if isNull, err := p.ParseListBegin(); isNull || err != nil { - return name, STOP, 0, err - } - bType, err := p.ReadByte() - thetype := TType(bType) - if err != nil { - return name, thetype, 0, err - } - id, err := p.ReadI16() - return name, thetype, id, err - */ } e := fmt.Errorf("Expected \"}\" or '\"', but found: '%s'", string(b)) return "", STOP, 0, NewTProtocolExceptionWithType(INVALID_DATA, e) @@ -362,57 +374,56 @@ func (p *TSimpleJSONProtocol) ReadFieldBegin() (string, TType, int16, error) { return "", STOP, 0, NewTProtocolException(io.EOF) } -func (p *TSimpleJSONProtocol) ReadFieldEnd() error { +func (p *TSimpleJSONProtocol) ReadFieldEnd(ctx context.Context) error { return nil - //return p.ParseListEnd() } -func (p *TSimpleJSONProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, e error) { +func (p *TSimpleJSONProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, e error) { if isNull, e := p.ParseListBegin(); isNull || e != nil { return VOID, VOID, 0, e } // read keyType - bKeyType, e := p.ReadByte() + bKeyType, e := p.ReadByte(ctx) keyType = TType(bKeyType) if e != nil { return keyType, valueType, size, e } // read valueType - bValueType, e := p.ReadByte() + bValueType, e := p.ReadByte(ctx) valueType = TType(bValueType) if e != nil { return keyType, valueType, size, e } // read size - iSize, err := p.ReadI64() + iSize, err := p.ReadI64(ctx) size = int(iSize) return keyType, valueType, size, err } -func (p *TSimpleJSONProtocol) ReadMapEnd() error { +func (p *TSimpleJSONProtocol) ReadMapEnd(ctx context.Context) error { return p.ParseListEnd() } -func (p *TSimpleJSONProtocol) ReadListBegin() (elemType TType, size int, e error) { +func (p *TSimpleJSONProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, e error) { return p.ParseElemListBegin() } -func (p *TSimpleJSONProtocol) ReadListEnd() error { +func (p *TSimpleJSONProtocol) ReadListEnd(ctx context.Context) error { return p.ParseListEnd() } -func (p *TSimpleJSONProtocol) ReadSetBegin() (elemType TType, size int, e error) { +func (p *TSimpleJSONProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, e error) { return p.ParseElemListBegin() } -func (p *TSimpleJSONProtocol) ReadSetEnd() error { +func (p *TSimpleJSONProtocol) ReadSetEnd(ctx context.Context) error { return p.ParseListEnd() } -func (p *TSimpleJSONProtocol) ReadBool() (bool, error) { +func (p *TSimpleJSONProtocol) ReadBool(ctx context.Context) (bool, error) { var value bool if err := p.ParsePreValue(); err != nil { @@ -467,32 +478,32 @@ func (p *TSimpleJSONProtocol) ReadBool() (bool, error) { return value, p.ParsePostValue() } -func (p *TSimpleJSONProtocol) ReadByte() (int8, error) { - v, err := p.ReadI64() +func (p *TSimpleJSONProtocol) ReadByte(ctx context.Context) (int8, error) { + v, err := p.ReadI64(ctx) return int8(v), err } -func (p *TSimpleJSONProtocol) ReadI16() (int16, error) { - v, err := p.ReadI64() +func (p *TSimpleJSONProtocol) ReadI16(ctx context.Context) (int16, error) { + v, err := p.ReadI64(ctx) return int16(v), err } -func (p *TSimpleJSONProtocol) ReadI32() (int32, error) { - v, err := p.ReadI64() +func (p *TSimpleJSONProtocol) ReadI32(ctx context.Context) (int32, error) { + v, err := p.ReadI64(ctx) return int32(v), err } -func (p *TSimpleJSONProtocol) ReadI64() (int64, error) { +func (p *TSimpleJSONProtocol) ReadI64(ctx context.Context) (int64, error) { v, _, err := p.ParseI64() return v, err } -func (p *TSimpleJSONProtocol) ReadDouble() (float64, error) { +func (p *TSimpleJSONProtocol) ReadDouble(ctx context.Context) (float64, error) { v, _, err := p.ParseF64() return v, err } -func (p *TSimpleJSONProtocol) ReadString() (string, error) { +func (p *TSimpleJSONProtocol) ReadString(ctx context.Context) (string, error) { var v string if err := p.ParsePreValue(); err != nil { return v, err @@ -522,7 +533,7 @@ func (p *TSimpleJSONProtocol) ReadString() (string, error) { return v, p.ParsePostValue() } -func (p *TSimpleJSONProtocol) ReadBinary() ([]byte, error) { +func (p *TSimpleJSONProtocol) ReadBinary(ctx context.Context) ([]byte, error) { var v []byte if err := p.ParsePreValue(); err != nil { return nil, err @@ -557,8 +568,8 @@ func (p *TSimpleJSONProtocol) Flush(ctx context.Context) (err error) { return NewTProtocolException(p.writer.Flush()) } -func (p *TSimpleJSONProtocol) Skip(fieldType TType) (err error) { - return SkipDefaultDepth(p, fieldType) +func (p *TSimpleJSONProtocol) Skip(ctx context.Context, fieldType TType) (err error) { + return SkipDefaultDepth(ctx, p, fieldType) } func (p *TSimpleJSONProtocol) Transport() TTransport { @@ -566,41 +577,41 @@ func (p *TSimpleJSONProtocol) Transport() TTransport { } func (p *TSimpleJSONProtocol) OutputPreValue() error { - cxt := _ParseContext(p.dumpContext[len(p.dumpContext)-1]) + cxt, ok := p.dumpContext.peek() + if !ok { + return errEmptyJSONContextStack + } switch cxt { case _CONTEXT_IN_LIST, _CONTEXT_IN_OBJECT_NEXT_KEY: if _, e := p.write(JSON_COMMA); e != nil { return NewTProtocolException(e) } - break case _CONTEXT_IN_OBJECT_NEXT_VALUE: if _, e := p.write(JSON_COLON); e != nil { return NewTProtocolException(e) } - break } return nil } func (p *TSimpleJSONProtocol) OutputPostValue() error { - cxt := _ParseContext(p.dumpContext[len(p.dumpContext)-1]) + cxt, ok := p.dumpContext.peek() + if !ok { + return errEmptyJSONContextStack + } switch cxt { case _CONTEXT_IN_LIST_FIRST: - p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] - p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_LIST)) - break + p.dumpContext.pop() + p.dumpContext.push(_CONTEXT_IN_LIST) case _CONTEXT_IN_OBJECT_FIRST: - p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] - p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_VALUE)) - break + p.dumpContext.pop() + p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_VALUE) case _CONTEXT_IN_OBJECT_NEXT_KEY: - p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] - p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_VALUE)) - break + p.dumpContext.pop() + p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_VALUE) case _CONTEXT_IN_OBJECT_NEXT_VALUE: - p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] - p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_KEY)) - break + p.dumpContext.pop() + p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_KEY) } return nil } @@ -615,10 +626,13 @@ func (p *TSimpleJSONProtocol) OutputBool(value bool) error { } else { v = string(JSON_FALSE) } - switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) { + cxt, ok := p.dumpContext.peek() + if !ok { + return errEmptyJSONContextStack + } + switch cxt { case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: v = jsonQuote(v) - default: } if e := p.OutputStringData(v); e != nil { return e @@ -648,11 +662,14 @@ func (p *TSimpleJSONProtocol) OutputF64(value float64) error { } else if math.IsInf(value, -1) { v = string(JSON_QUOTE) + JSON_NEGATIVE_INFINITY + string(JSON_QUOTE) } else { + cxt, ok := p.dumpContext.peek() + if !ok { + return errEmptyJSONContextStack + } v = strconv.FormatFloat(value, 'g', -1, 64) - switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) { + switch cxt { case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: v = string(JSON_QUOTE) + v + string(JSON_QUOTE) - default: } } if e := p.OutputStringData(v); e != nil { @@ -665,11 +682,14 @@ func (p *TSimpleJSONProtocol) OutputI64(value int64) error { if e := p.OutputPreValue(); e != nil { return e } + cxt, ok := p.dumpContext.peek() + if !ok { + return errEmptyJSONContextStack + } v := strconv.FormatInt(value, 10) - switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) { + switch cxt { case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: v = jsonQuote(v) - default: } if e := p.OutputStringData(v); e != nil { return e @@ -699,7 +719,7 @@ func (p *TSimpleJSONProtocol) OutputObjectBegin() error { if _, e := p.write(JSON_LBRACE); e != nil { return NewTProtocolException(e) } - p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_FIRST)) + p.dumpContext.push(_CONTEXT_IN_OBJECT_FIRST) return nil } @@ -707,7 +727,10 @@ func (p *TSimpleJSONProtocol) OutputObjectEnd() error { if _, e := p.write(JSON_RBRACE); e != nil { return NewTProtocolException(e) } - p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] + _, ok := p.dumpContext.pop() + if !ok { + return errEmptyJSONContextStack + } if e := p.OutputPostValue(); e != nil { return e } @@ -721,7 +744,7 @@ func (p *TSimpleJSONProtocol) OutputListBegin() error { if _, e := p.write(JSON_LBRACKET); e != nil { return NewTProtocolException(e) } - p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_LIST_FIRST)) + p.dumpContext.push(_CONTEXT_IN_LIST_FIRST) return nil } @@ -729,7 +752,10 @@ func (p *TSimpleJSONProtocol) OutputListEnd() error { if _, e := p.write(JSON_RBRACKET); e != nil { return NewTProtocolException(e) } - p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] + _, ok := p.dumpContext.pop() + if !ok { + return errEmptyJSONContextStack + } if e := p.OutputPostValue(); e != nil { return e } @@ -740,10 +766,10 @@ func (p *TSimpleJSONProtocol) OutputElemListBegin(elemType TType, size int) erro if e := p.OutputListBegin(); e != nil { return e } - if e := p.WriteByte(int8(elemType)); e != nil { + if e := p.OutputI64(int64(elemType)); e != nil { return e } - if e := p.WriteI64(int64(size)); e != nil { + if e := p.OutputI64(int64(size)); e != nil { return e } return nil @@ -753,7 +779,10 @@ func (p *TSimpleJSONProtocol) ParsePreValue() error { if e := p.readNonSignificantWhitespace(); e != nil { return NewTProtocolException(e) } - cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) + cxt, ok := p.parseContextStack.peek() + if !ok { + return errEmptyJSONContextStack + } b, _ := p.reader.Peek(1) switch cxt { case _CONTEXT_IN_LIST: @@ -772,7 +801,6 @@ func (p *TSimpleJSONProtocol) ParsePreValue() error { return NewTProtocolExceptionWithType(INVALID_DATA, e) } } - break case _CONTEXT_IN_OBJECT_NEXT_KEY: if len(b) > 0 { switch b[0] { @@ -789,7 +817,6 @@ func (p *TSimpleJSONProtocol) ParsePreValue() error { return NewTProtocolExceptionWithType(INVALID_DATA, e) } } - break case _CONTEXT_IN_OBJECT_NEXT_VALUE: if len(b) > 0 { switch b[0] { @@ -804,7 +831,6 @@ func (p *TSimpleJSONProtocol) ParsePreValue() error { return NewTProtocolExceptionWithType(INVALID_DATA, e) } } - break } return nil } @@ -813,20 +839,20 @@ func (p *TSimpleJSONProtocol) ParsePostValue() error { if e := p.readNonSignificantWhitespace(); e != nil { return NewTProtocolException(e) } - cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) + cxt, ok := p.parseContextStack.peek() + if !ok { + return errEmptyJSONContextStack + } switch cxt { case _CONTEXT_IN_LIST_FIRST: - p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] - p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_LIST)) - break + p.parseContextStack.pop() + p.parseContextStack.push(_CONTEXT_IN_LIST) case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: - p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] - p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_NEXT_VALUE)) - break + p.parseContextStack.pop() + p.parseContextStack.push(_CONTEXT_IN_OBJECT_NEXT_VALUE) case _CONTEXT_IN_OBJECT_NEXT_VALUE: - p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] - p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_NEXT_KEY)) - break + p.parseContextStack.pop() + p.parseContextStack.push(_CONTEXT_IN_OBJECT_NEXT_KEY) } return nil } @@ -979,7 +1005,7 @@ func (p *TSimpleJSONProtocol) ParseObjectStart() (bool, error) { } if len(b) > 0 && b[0] == JSON_LBRACE[0] { p.reader.ReadByte() - p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_FIRST)) + p.parseContextStack.push(_CONTEXT_IN_OBJECT_FIRST) return false, nil } else if p.safePeekContains(JSON_NULL) { return true, nil @@ -992,7 +1018,7 @@ func (p *TSimpleJSONProtocol) ParseObjectEnd() error { if isNull, err := p.readIfNull(); isNull || err != nil { return err } - cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) + cxt, _ := p.parseContextStack.peek() if (cxt != _CONTEXT_IN_OBJECT_FIRST) && (cxt != _CONTEXT_IN_OBJECT_NEXT_KEY) { e := fmt.Errorf("Expected to be in the Object Context, but not in Object Context (%d)", cxt) return NewTProtocolExceptionWithType(INVALID_DATA, e) @@ -1010,7 +1036,7 @@ func (p *TSimpleJSONProtocol) ParseObjectEnd() error { break } } - p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] + p.parseContextStack.pop() return p.ParsePostValue() } @@ -1024,7 +1050,7 @@ func (p *TSimpleJSONProtocol) ParseListBegin() (isNull bool, err error) { return false, err } if len(b) >= 1 && b[0] == JSON_LBRACKET[0] { - p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_LIST_FIRST)) + p.parseContextStack.push(_CONTEXT_IN_LIST_FIRST) p.reader.ReadByte() isNull = false } else if p.safePeekContains(JSON_NULL) { @@ -1039,12 +1065,12 @@ func (p *TSimpleJSONProtocol) ParseElemListBegin() (elemType TType, size int, e if isNull, e := p.ParseListBegin(); isNull || e != nil { return VOID, 0, e } - bElemType, err := p.ReadByte() + bElemType, _, err := p.ParseI64() elemType = TType(bElemType) if err != nil { return elemType, size, err } - nSize, err2 := p.ReadI64() + nSize, _, err2 := p.ParseI64() size = int(nSize) return elemType, size, err2 } @@ -1053,7 +1079,7 @@ func (p *TSimpleJSONProtocol) ParseListEnd() error { if isNull, err := p.readIfNull(); isNull || err != nil { return err } - cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) + cxt, _ := p.parseContextStack.peek() if cxt != _CONTEXT_IN_LIST { e := fmt.Errorf("Expected to be in the List Context, but not in List Context (%d)", cxt) return NewTProtocolExceptionWithType(INVALID_DATA, e) @@ -1071,8 +1097,10 @@ func (p *TSimpleJSONProtocol) ParseListEnd() error { break } } - p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] - if _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) == _CONTEXT_IN_TOPLEVEL { + p.parseContextStack.pop() + if cxt, ok := p.parseContextStack.peek(); !ok { + return errEmptyJSONContextStack + } else if cxt == _CONTEXT_IN_TOPLEVEL { return nil } return p.ParsePostValue() @@ -1325,8 +1353,8 @@ func (p *TSimpleJSONProtocol) safePeekContains(b []byte) bool { // Reset the context stack to its initial state. func (p *TSimpleJSONProtocol) resetContextStack() { - p.parseContextStack = []int{int(_CONTEXT_IN_TOPLEVEL)} - p.dumpContext = []int{int(_CONTEXT_IN_TOPLEVEL)} + p.parseContextStack = jsonContextStack{_CONTEXT_IN_TOPLEVEL} + p.dumpContext = jsonContextStack{_CONTEXT_IN_TOPLEVEL} } func (p *TSimpleJSONProtocol) write(b []byte) (int, error) { @@ -1336,3 +1364,10 @@ func (p *TSimpleJSONProtocol) write(b []byte) (int, error) { } return n, err } + +// SetTConfiguration implements TConfigurationSetter for propagation. +func (p *TSimpleJSONProtocol) SetTConfiguration(conf *TConfiguration) { + PropagateTConfiguration(p.trans, conf) +} + +var _ TConfigurationSetter = (*TSimpleJSONProtocol)(nil) diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/simple_server.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_server.go similarity index 77% rename from src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/simple_server.go rename to src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_server.go index f8efbed91..563cbfc69 100644 --- a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/simple_server.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_server.go @@ -20,12 +20,34 @@ package thrift import ( - "log" - "runtime/debug" + "errors" + "fmt" + "io" "sync" "sync/atomic" + "time" ) +// ErrAbandonRequest is a special error server handler implementations can +// return to indicate that the request has been abandoned. +// +// TSimpleServer will check for this error, and close the client connection +// instead of writing the response/error back to the client. +// +// It shall only be used when the server handler implementation know that the +// client already abandoned the request (by checking that the passed in context +// is already canceled, for example). +var ErrAbandonRequest = errors.New("request abandoned") + +// ServerConnectivityCheckInterval defines the ticker interval used by +// connectivity check in thrift compiled TProcessorFunc implementations. +// +// It's defined as a variable instead of constant, so that thrift server +// implementations can change its value to control the behavior. +// +// If it's changed to <=0, the feature will be disabled. +var ServerConnectivityCheckInterval = time.Millisecond * 5 + /* * This is not a typical TSimpleServer as it is not blocked after accept a socket. * It is more like a TThreadedServer that can handle different connections in different goroutines. @@ -45,6 +67,8 @@ type TSimpleServer struct { // Headers to auto forward in THeaderProtocol forwardHeaders []string + + logger Logger } func NewTSimpleServer2(processor TProcessor, serverTransport TServerTransport) *TSimpleServer { @@ -148,6 +172,14 @@ func (p *TSimpleServer) SetForwardHeaders(headers []string) { p.forwardHeaders = keys } +// SetLogger sets the logger used by this TSimpleServer. +// +// If no logger was set before Serve is called, a default logger using standard +// log library will be used. +func (p *TSimpleServer) SetLogger(logger Logger) { + p.logger = logger +} + func (p *TSimpleServer) innerAccept() (int32, error) { client, err := p.serverTransport.Accept() p.mu.Lock() @@ -164,7 +196,7 @@ func (p *TSimpleServer) innerAccept() (int32, error) { go func() { defer p.wg.Done() if err := p.processRequests(client); err != nil { - log.Println("error processing request:", err) + p.logger(fmt.Sprintf("error processing request: %v", err)) } }() } @@ -184,6 +216,8 @@ func (p *TSimpleServer) AcceptLoop() error { } func (p *TSimpleServer) Serve() error { + p.logger = fallbackLogger(p.logger) + err := p.Listen() if err != nil { return err @@ -204,7 +238,26 @@ func (p *TSimpleServer) Stop() error { return nil } -func (p *TSimpleServer) processRequests(client TTransport) error { +// If err is actually EOF, return nil, otherwise return err as-is. +func treatEOFErrorsAsNil(err error) error { + if err == nil { + return nil + } + if errors.Is(err, io.EOF) { + return nil + } + var te TTransportException + if errors.As(err, &te) && te.TypeId() == END_OF_FILE { + return nil + } + return err +} + +func (p *TSimpleServer) processRequests(client TTransport) (err error) { + defer func() { + err = treatEOFErrorsAsNil(err) + }() + processor := p.processorFactory.GetProcessor(client) inputTransport, err := p.inputTransportFactory.GetTransport(client) if err != nil { @@ -229,12 +282,6 @@ func (p *TSimpleServer) processRequests(client TTransport) error { outputProtocol = p.outputProtocolFactory.GetProtocol(outputTransport) } - defer func() { - if e := recover(); e != nil { - log.Printf("panic in processor: %s: %s", e, debug.Stack()) - } - }() - if inputTransport != nil { defer inputTransport.Close() } @@ -246,7 +293,12 @@ func (p *TSimpleServer) processRequests(client TTransport) error { return nil } - ctx := defaultCtx + ctx := SetResponseHelper( + defaultCtx, + TResponseHelper{ + THeaderResponseHelper: NewTHeaderResponseHelper(outputProtocol), + }, + ) if headerProtocol != nil { // We need to call ReadFrame here, otherwise we won't // get any headers on the AddReadTHeaderToContext call. @@ -254,20 +306,22 @@ func (p *TSimpleServer) processRequests(client TTransport) error { // ReadFrame is safe to be called multiple times so it // won't break when it's called again later when we // actually start to read the message. - if err := headerProtocol.ReadFrame(); err != nil { + if err := headerProtocol.ReadFrame(ctx); err != nil { return err } - ctx = AddReadTHeaderToContext(defaultCtx, headerProtocol.GetReadHeaders()) + ctx = AddReadTHeaderToContext(ctx, headerProtocol.GetReadHeaders()) ctx = SetWriteHeaderList(ctx, p.forwardHeaders) } ok, err := processor.Process(ctx, inputProtocol, outputProtocol) - if err, ok := err.(TTransportException); ok && err.TypeId() == END_OF_FILE { - return nil - } else if err != nil { + if errors.Is(err, ErrAbandonRequest) { + return client.Close() + } + if errors.As(err, new(TTransportException)) && err != nil { return err } - if err, ok := err.(TApplicationException); ok && err.TypeId() == UNKNOWN_METHOD { + var tae TApplicationException + if errors.As(err, &tae) && tae.TypeId() == UNKNOWN_METHOD { continue } if !ok { diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket.go new file mode 100644 index 000000000..e911bf166 --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket.go @@ -0,0 +1,238 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "net" + "time" +) + +type TSocket struct { + conn *socketConn + addr net.Addr + cfg *TConfiguration + + connectTimeout time.Duration + socketTimeout time.Duration +} + +// Deprecated: Use NewTSocketConf instead. +func NewTSocket(hostPort string) (*TSocket, error) { + return NewTSocketConf(hostPort, &TConfiguration{ + noPropagation: true, + }) +} + +// NewTSocketConf creates a net.Conn-backed TTransport, given a host and port. +// +// Example: +// +// trans, err := thrift.NewTSocketConf("localhost:9090", &TConfiguration{ +// ConnectTimeout: time.Second, // Use 0 for no timeout +// SocketTimeout: time.Second, // Use 0 for no timeout +// }) +func NewTSocketConf(hostPort string, conf *TConfiguration) (*TSocket, error) { + addr, err := net.ResolveTCPAddr("tcp", hostPort) + if err != nil { + return nil, err + } + return NewTSocketFromAddrConf(addr, conf), nil +} + +// Deprecated: Use NewTSocketConf instead. +func NewTSocketTimeout(hostPort string, connTimeout time.Duration, soTimeout time.Duration) (*TSocket, error) { + return NewTSocketConf(hostPort, &TConfiguration{ + ConnectTimeout: connTimeout, + SocketTimeout: soTimeout, + + noPropagation: true, + }) +} + +// NewTSocketFromAddrConf creates a TSocket from a net.Addr +func NewTSocketFromAddrConf(addr net.Addr, conf *TConfiguration) *TSocket { + return &TSocket{ + addr: addr, + cfg: conf, + } +} + +// Deprecated: Use NewTSocketFromAddrConf instead. +func NewTSocketFromAddrTimeout(addr net.Addr, connTimeout time.Duration, soTimeout time.Duration) *TSocket { + return NewTSocketFromAddrConf(addr, &TConfiguration{ + ConnectTimeout: connTimeout, + SocketTimeout: soTimeout, + + noPropagation: true, + }) +} + +// NewTSocketFromConnConf creates a TSocket from an existing net.Conn. +func NewTSocketFromConnConf(conn net.Conn, conf *TConfiguration) *TSocket { + return &TSocket{ + conn: wrapSocketConn(conn), + addr: conn.RemoteAddr(), + cfg: conf, + } +} + +// Deprecated: Use NewTSocketFromConnConf instead. +func NewTSocketFromConnTimeout(conn net.Conn, socketTimeout time.Duration) *TSocket { + return NewTSocketFromConnConf(conn, &TConfiguration{ + SocketTimeout: socketTimeout, + + noPropagation: true, + }) +} + +// SetTConfiguration implements TConfigurationSetter. +// +// It can be used to set connect and socket timeouts. +func (p *TSocket) SetTConfiguration(conf *TConfiguration) { + p.cfg = conf +} + +// Sets the connect timeout +func (p *TSocket) SetConnTimeout(timeout time.Duration) error { + if p.cfg == nil { + p.cfg = &TConfiguration{ + noPropagation: true, + } + } + p.cfg.ConnectTimeout = timeout + return nil +} + +// Sets the socket timeout +func (p *TSocket) SetSocketTimeout(timeout time.Duration) error { + if p.cfg == nil { + p.cfg = &TConfiguration{ + noPropagation: true, + } + } + p.cfg.SocketTimeout = timeout + return nil +} + +func (p *TSocket) pushDeadline(read, write bool) { + var t time.Time + if timeout := p.cfg.GetSocketTimeout(); timeout > 0 { + t = time.Now().Add(time.Duration(timeout)) + } + if read && write { + p.conn.SetDeadline(t) + } else if read { + p.conn.SetReadDeadline(t) + } else if write { + p.conn.SetWriteDeadline(t) + } +} + +// Connects the socket, creating a new socket object if necessary. +func (p *TSocket) Open() error { + if p.conn.isValid() { + return NewTTransportException(ALREADY_OPEN, "Socket already connected.") + } + if p.addr == nil { + return NewTTransportException(NOT_OPEN, "Cannot open nil address.") + } + if len(p.addr.Network()) == 0 { + return NewTTransportException(NOT_OPEN, "Cannot open bad network name.") + } + if len(p.addr.String()) == 0 { + return NewTTransportException(NOT_OPEN, "Cannot open bad address.") + } + var err error + if p.conn, err = createSocketConnFromReturn(net.DialTimeout( + p.addr.Network(), + p.addr.String(), + p.cfg.GetConnectTimeout(), + )); err != nil { + return NewTTransportException(NOT_OPEN, err.Error()) + } + return nil +} + +// Retrieve the underlying net.Conn +func (p *TSocket) Conn() net.Conn { + return p.conn +} + +// Returns true if the connection is open +func (p *TSocket) IsOpen() bool { + return p.conn.IsOpen() +} + +// Closes the socket. +func (p *TSocket) Close() error { + // Close the socket + if p.conn != nil { + err := p.conn.Close() + if err != nil { + return err + } + p.conn = nil + } + return nil +} + +//Returns the remote address of the socket. +func (p *TSocket) Addr() net.Addr { + return p.addr +} + +func (p *TSocket) Read(buf []byte) (int, error) { + if !p.conn.isValid() { + return 0, NewTTransportException(NOT_OPEN, "Connection not open") + } + p.pushDeadline(true, false) + // NOTE: Calling any of p.IsOpen, p.conn.read0, or p.conn.IsOpen between + // p.pushDeadline and p.conn.Read could cause the deadline set inside + // p.pushDeadline being reset, thus need to be avoided. + n, err := p.conn.Read(buf) + return n, NewTTransportExceptionFromError(err) +} + +func (p *TSocket) Write(buf []byte) (int, error) { + if !p.conn.isValid() { + return 0, NewTTransportException(NOT_OPEN, "Connection not open") + } + p.pushDeadline(false, true) + return p.conn.Write(buf) +} + +func (p *TSocket) Flush(ctx context.Context) error { + return nil +} + +func (p *TSocket) Interrupt() error { + if !p.conn.isValid() { + return nil + } + return p.conn.Close() +} + +func (p *TSocket) RemainingBytes() (num_bytes uint64) { + const maxSize = ^uint64(0) + return maxSize // the truth is, we just don't know unless framed is used +} + +var _ TConfigurationSetter = (*TSocket)(nil) diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_conn.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_conn.go new file mode 100644 index 000000000..c1cc30c6c --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_conn.go @@ -0,0 +1,102 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "net" +) + +// socketConn is a wrapped net.Conn that tries to do connectivity check. +type socketConn struct { + net.Conn + + buffer [1]byte +} + +var _ net.Conn = (*socketConn)(nil) + +// createSocketConnFromReturn is a language sugar to help create socketConn from +// return values of functions like net.Dial, tls.Dial, net.Listener.Accept, etc. +func createSocketConnFromReturn(conn net.Conn, err error) (*socketConn, error) { + if err != nil { + return nil, err + } + return &socketConn{ + Conn: conn, + }, nil +} + +// wrapSocketConn wraps an existing net.Conn into *socketConn. +func wrapSocketConn(conn net.Conn) *socketConn { + // In case conn is already wrapped, + // return it as-is and avoid double wrapping. + if sc, ok := conn.(*socketConn); ok { + return sc + } + + return &socketConn{ + Conn: conn, + } +} + +// isValid checks whether there's a valid connection. +// +// It's nil safe, and returns false if sc itself is nil, or if the underlying +// connection is nil. +// +// It's the same as the previous implementation of TSocket.IsOpen and +// TSSLSocket.IsOpen before we added connectivity check. +func (sc *socketConn) isValid() bool { + return sc != nil && sc.Conn != nil +} + +// IsOpen checks whether the connection is open. +// +// It's nil safe, and returns false if sc itself is nil, or if the underlying +// connection is nil. +// +// Otherwise, it tries to do a connectivity check and returns the result. +// +// It also has the side effect of resetting the previously set read deadline on +// the socket. As a result, it shouldn't be called between setting read deadline +// and doing actual read. +func (sc *socketConn) IsOpen() bool { + if !sc.isValid() { + return false + } + return sc.checkConn() == nil +} + +// Read implements io.Reader. +// +// On Windows, it behaves the same as the underlying net.Conn.Read. +// +// On non-Windows, it treats len(p) == 0 as a connectivity check instead of +// readability check, which means instead of blocking until there's something to +// read (readability check), or always return (0, nil) (the default behavior of +// go's stdlib implementation on non-Windows), it never blocks, and will return +// an error if the connection is lost. +func (sc *socketConn) Read(p []byte) (n int, err error) { + if len(p) == 0 { + return 0, sc.read0() + } + + return sc.Conn.Read(p) +} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_unix_conn.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_unix_conn.go new file mode 100644 index 000000000..f5fab3ab6 --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_unix_conn.go @@ -0,0 +1,83 @@ +// +build !windows + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "errors" + "io" + "syscall" + "time" +) + +// We rely on this variable to be the zero time, +// but define it as global variable to avoid repetitive allocations. +// Please DO NOT mutate this variable in any way. +var zeroTime time.Time + +func (sc *socketConn) read0() error { + return sc.checkConn() +} + +func (sc *socketConn) checkConn() error { + syscallConn, ok := sc.Conn.(syscall.Conn) + if !ok { + // No way to check, return nil + return nil + } + + // The reading about to be done here is non-blocking so we don't really + // need a read deadline. We just need to clear the previously set read + // deadline, if any. + sc.Conn.SetReadDeadline(zeroTime) + + rc, err := syscallConn.SyscallConn() + if err != nil { + return err + } + + var n int + + if readErr := rc.Read(func(fd uintptr) bool { + n, _, err = syscall.Recvfrom(int(fd), sc.buffer[:], syscall.MSG_PEEK|syscall.MSG_DONTWAIT) + return true + }); readErr != nil { + return readErr + } + + if n > 0 { + // We got something, which means we are good + return nil + } + + if errors.Is(err, syscall.EAGAIN) || errors.Is(err, syscall.EWOULDBLOCK) { + // This means the connection is still open but we don't have + // anything to read right now. + return nil + } + + if err != nil { + return err + } + + // At this point, it means the other side already closed the connection. + return io.EOF +} diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/exception.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_windows_conn.go similarity index 56% rename from src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/exception.go rename to src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_windows_conn.go index ea8d6f661..679838c3b 100644 --- a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/exception.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_windows_conn.go @@ -1,3 +1,5 @@ +// +build windows + /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -19,26 +21,14 @@ package thrift -import ( - "errors" -) - -// Generic Thrift exception -type TException interface { - error +func (sc *socketConn) read0() error { + // On windows, we fallback to the default behavior of reading 0 bytes. + var p []byte + _, err := sc.Conn.Read(p) + return err } -// Prepends additional information to an error without losing the Thrift exception interface -func PrependError(prepend string, err error) error { - if t, ok := err.(TTransportException); ok { - return NewTTransportException(t.TypeId(), prepend+t.Error()) - } - if t, ok := err.(TProtocolException); ok { - return NewTProtocolExceptionWithType(t.TypeId(), errors.New(prepend+err.Error())) - } - if t, ok := err.(TApplicationException); ok { - return NewTApplicationException(t.TypeId(), prepend+t.Error()) - } - - return errors.New(prepend + err.Error()) +func (sc *socketConn) checkConn() error { + // On windows, we always return nil for this check. + return nil } diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/ssl_server_socket.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_server_socket.go similarity index 100% rename from src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/ssl_server_socket.go rename to src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_server_socket.go diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_socket.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_socket.go new file mode 100644 index 000000000..6359a74ce --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_socket.go @@ -0,0 +1,258 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "crypto/tls" + "net" + "time" +) + +type TSSLSocket struct { + conn *socketConn + // hostPort contains host:port (e.g. "asdf.com:12345"). The field is + // only valid if addr is nil. + hostPort string + // addr is nil when hostPort is not "", and is only used when the + // TSSLSocket is constructed from a net.Addr. + addr net.Addr + + cfg *TConfiguration +} + +// NewTSSLSocketConf creates a net.Conn-backed TTransport, given a host and port. +// +// Example: +// +// trans, err := thrift.NewTSSLSocketConf("localhost:9090", nil, &TConfiguration{ +// ConnectTimeout: time.Second, // Use 0 for no timeout +// SocketTimeout: time.Second, // Use 0 for no timeout +// }) +func NewTSSLSocketConf(hostPort string, conf *TConfiguration) (*TSSLSocket, error) { + if cfg := conf.GetTLSConfig(); cfg != nil && cfg.MinVersion == 0 { + cfg.MinVersion = tls.VersionTLS10 + } + return &TSSLSocket{ + hostPort: hostPort, + cfg: conf, + }, nil +} + +// Deprecated: Use NewTSSLSocketConf instead. +func NewTSSLSocket(hostPort string, cfg *tls.Config) (*TSSLSocket, error) { + return NewTSSLSocketConf(hostPort, &TConfiguration{ + TLSConfig: cfg, + + noPropagation: true, + }) +} + +// Deprecated: Use NewTSSLSocketConf instead. +func NewTSSLSocketTimeout(hostPort string, cfg *tls.Config, connectTimeout, socketTimeout time.Duration) (*TSSLSocket, error) { + return NewTSSLSocketConf(hostPort, &TConfiguration{ + ConnectTimeout: connectTimeout, + SocketTimeout: socketTimeout, + TLSConfig: cfg, + + noPropagation: true, + }) +} + +// NewTSSLSocketFromAddrConf creates a TSSLSocket from a net.Addr. +func NewTSSLSocketFromAddrConf(addr net.Addr, conf *TConfiguration) *TSSLSocket { + return &TSSLSocket{ + addr: addr, + cfg: conf, + } +} + +// Deprecated: Use NewTSSLSocketFromAddrConf instead. +func NewTSSLSocketFromAddrTimeout(addr net.Addr, cfg *tls.Config, connectTimeout, socketTimeout time.Duration) *TSSLSocket { + return NewTSSLSocketFromAddrConf(addr, &TConfiguration{ + ConnectTimeout: connectTimeout, + SocketTimeout: socketTimeout, + TLSConfig: cfg, + + noPropagation: true, + }) +} + +// NewTSSLSocketFromConnConf creates a TSSLSocket from an existing net.Conn. +func NewTSSLSocketFromConnConf(conn net.Conn, conf *TConfiguration) *TSSLSocket { + return &TSSLSocket{ + conn: wrapSocketConn(conn), + addr: conn.RemoteAddr(), + cfg: conf, + } +} + +// Deprecated: Use NewTSSLSocketFromConnConf instead. +func NewTSSLSocketFromConnTimeout(conn net.Conn, cfg *tls.Config, socketTimeout time.Duration) *TSSLSocket { + return NewTSSLSocketFromConnConf(conn, &TConfiguration{ + SocketTimeout: socketTimeout, + TLSConfig: cfg, + + noPropagation: true, + }) +} + +// SetTConfiguration implements TConfigurationSetter. +// +// It can be used to change connect and socket timeouts. +func (p *TSSLSocket) SetTConfiguration(conf *TConfiguration) { + p.cfg = conf +} + +// Sets the connect timeout +func (p *TSSLSocket) SetConnTimeout(timeout time.Duration) error { + if p.cfg == nil { + p.cfg = &TConfiguration{} + } + p.cfg.ConnectTimeout = timeout + return nil +} + +// Sets the socket timeout +func (p *TSSLSocket) SetSocketTimeout(timeout time.Duration) error { + if p.cfg == nil { + p.cfg = &TConfiguration{} + } + p.cfg.SocketTimeout = timeout + return nil +} + +func (p *TSSLSocket) pushDeadline(read, write bool) { + var t time.Time + if timeout := p.cfg.GetSocketTimeout(); timeout > 0 { + t = time.Now().Add(time.Duration(timeout)) + } + if read && write { + p.conn.SetDeadline(t) + } else if read { + p.conn.SetReadDeadline(t) + } else if write { + p.conn.SetWriteDeadline(t) + } +} + +// Connects the socket, creating a new socket object if necessary. +func (p *TSSLSocket) Open() error { + var err error + // If we have a hostname, we need to pass the hostname to tls.Dial for + // certificate hostname checks. + if p.hostPort != "" { + if p.conn, err = createSocketConnFromReturn(tls.DialWithDialer( + &net.Dialer{ + Timeout: p.cfg.GetConnectTimeout(), + }, + "tcp", + p.hostPort, + p.cfg.GetTLSConfig(), + )); err != nil { + return NewTTransportException(NOT_OPEN, err.Error()) + } + } else { + if p.conn.isValid() { + return NewTTransportException(ALREADY_OPEN, "Socket already connected.") + } + if p.addr == nil { + return NewTTransportException(NOT_OPEN, "Cannot open nil address.") + } + if len(p.addr.Network()) == 0 { + return NewTTransportException(NOT_OPEN, "Cannot open bad network name.") + } + if len(p.addr.String()) == 0 { + return NewTTransportException(NOT_OPEN, "Cannot open bad address.") + } + if p.conn, err = createSocketConnFromReturn(tls.DialWithDialer( + &net.Dialer{ + Timeout: p.cfg.GetConnectTimeout(), + }, + p.addr.Network(), + p.addr.String(), + p.cfg.GetTLSConfig(), + )); err != nil { + return NewTTransportException(NOT_OPEN, err.Error()) + } + } + return nil +} + +// Retrieve the underlying net.Conn +func (p *TSSLSocket) Conn() net.Conn { + return p.conn +} + +// Returns true if the connection is open +func (p *TSSLSocket) IsOpen() bool { + return p.conn.IsOpen() +} + +// Closes the socket. +func (p *TSSLSocket) Close() error { + // Close the socket + if p.conn != nil { + err := p.conn.Close() + if err != nil { + return err + } + p.conn = nil + } + return nil +} + +func (p *TSSLSocket) Read(buf []byte) (int, error) { + if !p.conn.isValid() { + return 0, NewTTransportException(NOT_OPEN, "Connection not open") + } + p.pushDeadline(true, false) + // NOTE: Calling any of p.IsOpen, p.conn.read0, or p.conn.IsOpen between + // p.pushDeadline and p.conn.Read could cause the deadline set inside + // p.pushDeadline being reset, thus need to be avoided. + n, err := p.conn.Read(buf) + return n, NewTTransportExceptionFromError(err) +} + +func (p *TSSLSocket) Write(buf []byte) (int, error) { + if !p.conn.isValid() { + return 0, NewTTransportException(NOT_OPEN, "Connection not open") + } + p.pushDeadline(false, true) + return p.conn.Write(buf) +} + +func (p *TSSLSocket) Flush(ctx context.Context) error { + return nil +} + +func (p *TSSLSocket) Interrupt() error { + if !p.conn.isValid() { + return nil + } + return p.conn.Close() +} + +func (p *TSSLSocket) RemainingBytes() (num_bytes uint64) { + const maxSize = ^uint64(0) + return maxSize // the truth is, we just don't know unless framed is used +} + +var _ TConfigurationSetter = (*TSSLSocket)(nil) diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/transport.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport.go similarity index 100% rename from src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/transport.go rename to src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport.go diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/transport_exception.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_exception.go similarity index 59% rename from src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/transport_exception.go rename to src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_exception.go index 9505b4461..0a3f07646 100644 --- a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/transport_exception.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_exception.go @@ -46,6 +46,13 @@ const ( type tTransportException struct { typeId int err error + msg string +} + +var _ TTransportException = (*tTransportException)(nil) + +func (tTransportException) TExceptionType() TExceptionType { + return TExceptionTypeTransport } func (p *tTransportException) TypeId() int { @@ -53,15 +60,27 @@ func (p *tTransportException) TypeId() int { } func (p *tTransportException) Error() string { - return p.err.Error() + return p.msg } func (p *tTransportException) Err() error { return p.err } +func (p *tTransportException) Unwrap() error { + return p.err +} + +func (p *tTransportException) Timeout() bool { + return p.typeId == TIMED_OUT +} + func NewTTransportException(t int, e string) TTransportException { - return &tTransportException{typeId: t, err: errors.New(e)} + return &tTransportException{ + typeId: t, + err: errors.New(e), + msg: e, + } } func NewTTransportExceptionFromError(e error) TTransportException { @@ -73,18 +92,40 @@ func NewTTransportExceptionFromError(e error) TTransportException { return t } - switch v := e.(type) { - case TTransportException: - return v - case timeoutable: - if v.Timeout() { - return &tTransportException{typeId: TIMED_OUT, err: e} - } + te := &tTransportException{ + typeId: UNKNOWN_TRANSPORT_EXCEPTION, + err: e, + msg: e.Error(), } - if e == io.EOF { - return &tTransportException{typeId: END_OF_FILE, err: e} + if isTimeoutError(e) { + te.typeId = TIMED_OUT + return te } - return &tTransportException{typeId: UNKNOWN_TRANSPORT_EXCEPTION, err: e} + if errors.Is(e, io.EOF) { + te.typeId = END_OF_FILE + return te + } + + return te +} + +func prependTTransportException(prepend string, e TTransportException) TTransportException { + return &tTransportException{ + typeId: e.TypeId(), + err: e, + msg: prepend + e.Error(), + } +} + +// isTimeoutError returns true when err is an error caused by timeout. +// +// Note that this also includes TTransportException wrapped timeout errors. +func isTimeoutError(err error) bool { + var t timeoutable + if errors.As(err, &t) { + return t.Timeout() + } + return false } diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/transport_factory.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_factory.go similarity index 100% rename from src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/transport_factory.go rename to src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_factory.go diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/type.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/type.go similarity index 100% rename from src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/type.go rename to src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/type.go diff --git a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/zlib_transport.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/zlib_transport.go similarity index 93% rename from src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/zlib_transport.go rename to src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/zlib_transport.go index f3d42673a..259943a62 100644 --- a/src/runtime/vendor/github.com/apache/thrift/lib/go/thrift/zlib_transport.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/zlib_transport.go @@ -23,7 +23,6 @@ import ( "compress/zlib" "context" "io" - "log" ) // TZlibTransportFactory is a factory for TZlibTransport instances @@ -67,7 +66,6 @@ func NewTZlibTransportFactoryWithFactory(level int, factory TTransportFactory) * func NewTZlibTransport(trans TTransport, level int) (*TZlibTransport, error) { w, err := zlib.NewWriterLevel(trans, level) if err != nil { - log.Println(err) return nil, err } @@ -130,3 +128,10 @@ func (z *TZlibTransport) RemainingBytes() uint64 { func (z *TZlibTransport) Write(p []byte) (int, error) { return z.writer.Write(p) } + +// SetTConfiguration implements TConfigurationSetter for propagation. +func (z *TZlibTransport) SetTConfiguration(conf *TConfiguration) { + PropagateTConfiguration(z.transport, conf) +} + +var _ TConfigurationSetter = (*TZlibTransport)(nil) diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/jaeger.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/jaeger.go new file mode 100644 index 000000000..88364e0a8 --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/jaeger.go @@ -0,0 +1,349 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger" + +import ( + "context" + "encoding/binary" + "encoding/json" + "fmt" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + gen "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger" + "go.opentelemetry.io/otel/sdk/resource" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + semconv "go.opentelemetry.io/otel/semconv/v1.4.0" + "go.opentelemetry.io/otel/trace" +) + +const ( + keyInstrumentationLibraryName = "otel.library.name" + keyInstrumentationLibraryVersion = "otel.library.version" + keyError = "error" + keySpanKind = "span.kind" + keyStatusCode = "otel.status_code" + keyStatusMessage = "otel.status_description" + keyDroppedAttributeCount = "otel.event.dropped_attributes_count" + keyEventName = "event" +) + +// New returns an OTel Exporter implementation that exports the collected +// spans to Jaeger. +func New(endpointOption EndpointOption) (*Exporter, error) { + uploader, err := endpointOption.newBatchUploader() + if err != nil { + return nil, err + } + + // Fetch default service.name from default resource for backup + var defaultServiceName string + defaultResource := resource.Default() + if value, exists := defaultResource.Set().Value(semconv.ServiceNameKey); exists { + defaultServiceName = value.AsString() + } + if defaultServiceName == "" { + return nil, fmt.Errorf("failed to get service name from default resource") + } + + stopCh := make(chan struct{}) + e := &Exporter{ + uploader: uploader, + stopCh: stopCh, + defaultServiceName: defaultServiceName, + } + return e, nil +} + +// Exporter exports OpenTelemetry spans to a Jaeger agent or collector. +type Exporter struct { + uploader batchUploader + stopOnce sync.Once + stopCh chan struct{} + defaultServiceName string +} + +var _ sdktrace.SpanExporter = (*Exporter)(nil) + +// ExportSpans transforms and exports OpenTelemetry spans to Jaeger. +func (e *Exporter) ExportSpans(ctx context.Context, spans []sdktrace.ReadOnlySpan) error { + // Return fast if context is already canceled or Exporter shutdown. + select { + case <-ctx.Done(): + return ctx.Err() + case <-e.stopCh: + return nil + default: + } + + // Cancel export if Exporter is shutdown. + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + defer cancel() + go func(ctx context.Context, cancel context.CancelFunc) { + select { + case <-ctx.Done(): + case <-e.stopCh: + cancel() + } + }(ctx, cancel) + + for _, batch := range jaegerBatchList(spans, e.defaultServiceName) { + if err := e.uploader.upload(ctx, batch); err != nil { + return err + } + } + + return nil +} + +// Shutdown stops the Exporter. This will close all connections and release +// all resources held by the Exporter. +func (e *Exporter) Shutdown(ctx context.Context) error { + // Stop any active and subsequent exports. + e.stopOnce.Do(func() { close(e.stopCh) }) + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + return e.uploader.shutdown(ctx) +} + +func spanToThrift(ss sdktrace.ReadOnlySpan) *gen.Span { + attr := ss.Attributes() + tags := make([]*gen.Tag, 0, len(attr)) + for _, kv := range attr { + tag := keyValueToTag(kv) + if tag != nil { + tags = append(tags, tag) + } + } + + if il := ss.InstrumentationLibrary(); il.Name != "" { + tags = append(tags, getStringTag(keyInstrumentationLibraryName, il.Name)) + if il.Version != "" { + tags = append(tags, getStringTag(keyInstrumentationLibraryVersion, il.Version)) + } + } + + if ss.SpanKind() != trace.SpanKindInternal { + tags = append(tags, + getStringTag(keySpanKind, ss.SpanKind().String()), + ) + } + + if ss.Status().Code != codes.Unset { + tags = append(tags, getInt64Tag(keyStatusCode, int64(ss.Status().Code))) + if ss.Status().Description != "" { + tags = append(tags, getStringTag(keyStatusMessage, ss.Status().Description)) + } + + if ss.Status().Code == codes.Error { + tags = append(tags, getBoolTag(keyError, true)) + } + } + + var logs []*gen.Log + for _, a := range ss.Events() { + nTags := len(a.Attributes) + if a.Name != "" { + nTags++ + } + if a.DroppedAttributeCount != 0 { + nTags++ + } + fields := make([]*gen.Tag, 0, nTags) + if a.Name != "" { + // If an event contains an attribute with the same key, it needs + // to be given precedence and overwrite this. + fields = append(fields, getStringTag(keyEventName, a.Name)) + } + for _, kv := range a.Attributes { + tag := keyValueToTag(kv) + if tag != nil { + fields = append(fields, tag) + } + } + if a.DroppedAttributeCount != 0 { + fields = append(fields, getInt64Tag(keyDroppedAttributeCount, int64(a.DroppedAttributeCount))) + } + logs = append(logs, &gen.Log{ + Timestamp: a.Time.UnixNano() / 1000, + Fields: fields, + }) + } + + var refs []*gen.SpanRef + for _, link := range ss.Links() { + tid := link.SpanContext.TraceID() + sid := link.SpanContext.SpanID() + refs = append(refs, &gen.SpanRef{ + TraceIdHigh: int64(binary.BigEndian.Uint64(tid[0:8])), + TraceIdLow: int64(binary.BigEndian.Uint64(tid[8:16])), + SpanId: int64(binary.BigEndian.Uint64(sid[:])), + RefType: gen.SpanRefType_FOLLOWS_FROM, + }) + } + + tid := ss.SpanContext().TraceID() + sid := ss.SpanContext().SpanID() + psid := ss.Parent().SpanID() + return &gen.Span{ + TraceIdHigh: int64(binary.BigEndian.Uint64(tid[0:8])), + TraceIdLow: int64(binary.BigEndian.Uint64(tid[8:16])), + SpanId: int64(binary.BigEndian.Uint64(sid[:])), + ParentSpanId: int64(binary.BigEndian.Uint64(psid[:])), + OperationName: ss.Name(), // TODO: if span kind is added then add prefix "Sent"/"Recv" + Flags: int32(ss.SpanContext().TraceFlags()), + StartTime: ss.StartTime().UnixNano() / 1000, + Duration: ss.EndTime().Sub(ss.StartTime()).Nanoseconds() / 1000, + Tags: tags, + Logs: logs, + References: refs, + } +} + +func keyValueToTag(keyValue attribute.KeyValue) *gen.Tag { + var tag *gen.Tag + switch keyValue.Value.Type() { + case attribute.STRING: + s := keyValue.Value.AsString() + tag = &gen.Tag{ + Key: string(keyValue.Key), + VStr: &s, + VType: gen.TagType_STRING, + } + case attribute.BOOL: + b := keyValue.Value.AsBool() + tag = &gen.Tag{ + Key: string(keyValue.Key), + VBool: &b, + VType: gen.TagType_BOOL, + } + case attribute.INT64: + i := keyValue.Value.AsInt64() + tag = &gen.Tag{ + Key: string(keyValue.Key), + VLong: &i, + VType: gen.TagType_LONG, + } + case attribute.FLOAT64: + f := keyValue.Value.AsFloat64() + tag = &gen.Tag{ + Key: string(keyValue.Key), + VDouble: &f, + VType: gen.TagType_DOUBLE, + } + case attribute.BOOLSLICE, + attribute.INT64SLICE, + attribute.FLOAT64SLICE, + attribute.STRINGSLICE: + json, _ := json.Marshal(keyValue.Value.AsInterface()) + a := (string)(json) + tag = &gen.Tag{ + Key: string(keyValue.Key), + VStr: &a, + VType: gen.TagType_STRING, + } + } + return tag +} + +func getInt64Tag(k string, i int64) *gen.Tag { + return &gen.Tag{ + Key: k, + VLong: &i, + VType: gen.TagType_LONG, + } +} + +func getStringTag(k, s string) *gen.Tag { + return &gen.Tag{ + Key: k, + VStr: &s, + VType: gen.TagType_STRING, + } +} + +func getBoolTag(k string, b bool) *gen.Tag { + return &gen.Tag{ + Key: k, + VBool: &b, + VType: gen.TagType_BOOL, + } +} + +// jaegerBatchList transforms a slice of spans into a slice of jaeger Batch. +func jaegerBatchList(ssl []sdktrace.ReadOnlySpan, defaultServiceName string) []*gen.Batch { + if len(ssl) == 0 { + return nil + } + + batchDict := make(map[attribute.Distinct]*gen.Batch) + + for _, ss := range ssl { + if ss == nil { + continue + } + + resourceKey := ss.Resource().Equivalent() + batch, bOK := batchDict[resourceKey] + if !bOK { + batch = &gen.Batch{ + Process: process(ss.Resource(), defaultServiceName), + Spans: []*gen.Span{}, + } + } + batch.Spans = append(batch.Spans, spanToThrift(ss)) + batchDict[resourceKey] = batch + } + + // Transform the categorized map into a slice + batchList := make([]*gen.Batch, 0, len(batchDict)) + for _, batch := range batchDict { + batchList = append(batchList, batch) + } + return batchList +} + +// process transforms an OTel Resource into a jaeger Process. +func process(res *resource.Resource, defaultServiceName string) *gen.Process { + var process gen.Process + + var serviceName attribute.KeyValue + if res != nil { + for iter := res.Iter(); iter.Next(); { + if iter.Attribute().Key == semconv.ServiceNameKey { + serviceName = iter.Attribute() + // Don't convert service.name into tag. + continue + } + if tag := keyValueToTag(iter.Attribute()); tag != nil { + process.Tags = append(process.Tags, tag) + } + } + } + + // If no service.name is contained in a Span's Resource, + // that field MUST be populated from the default Resource. + if serviceName.Value.AsString() == "" { + serviceName = semconv.ServiceNameKey.String(defaultServiceName) + } + process.ServiceName = serviceName.Value.AsString() + + return &process +} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/reconnecting_udp_client.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/reconnecting_udp_client.go similarity index 98% rename from src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/reconnecting_udp_client.go rename to src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/reconnecting_udp_client.go index bc4fcd3a8..ae4bf87f5 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/reconnecting_udp_client.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/reconnecting_udp_client.go @@ -11,7 +11,8 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -package jaeger // import "go.opentelemetry.io/otel/exporters/trace/jaeger" + +package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger" import ( "fmt" diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/uploader.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/uploader.go new file mode 100644 index 000000000..26fbf1cb6 --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/jaeger/uploader.go @@ -0,0 +1,314 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger" + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "time" + + "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" + + gen "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger" +) + +// batchUploader send a batch of spans to Jaeger +type batchUploader interface { + upload(context.Context, *gen.Batch) error + shutdown(context.Context) error +} + +type EndpointOption interface { + newBatchUploader() (batchUploader, error) +} + +type endpointOptionFunc func() (batchUploader, error) + +func (fn endpointOptionFunc) newBatchUploader() (batchUploader, error) { + return fn() +} + +// WithAgentEndpoint configures the Jaeger exporter to send spans to a Jaeger agent +// over compact thrift protocol. This will use the following environment variables for +// configuration if no explicit option is provided: +// +// - OTEL_EXPORTER_JAEGER_AGENT_HOST is used for the agent address host +// - OTEL_EXPORTER_JAEGER_AGENT_PORT is used for the agent address port +// +// The passed options will take precedence over any environment variables and default values +// will be used if neither are provided. +func WithAgentEndpoint(options ...AgentEndpointOption) EndpointOption { + return endpointOptionFunc(func() (batchUploader, error) { + cfg := &agentEndpointConfig{ + agentClientUDPParams{ + AttemptReconnecting: true, + Host: envOr(envAgentHost, "localhost"), + Port: envOr(envAgentPort, "6831"), + }, + } + for _, opt := range options { + opt.apply(cfg) + } + + client, err := newAgentClientUDP(cfg.agentClientUDPParams) + if err != nil { + return nil, err + } + + return &agentUploader{client: client}, nil + }) +} + +type AgentEndpointOption interface { + apply(*agentEndpointConfig) +} + +type agentEndpointConfig struct { + agentClientUDPParams +} + +type agentEndpointOptionFunc func(*agentEndpointConfig) + +func (fn agentEndpointOptionFunc) apply(cfg *agentEndpointConfig) { + fn(cfg) +} + +// WithAgentHost sets a host to be used in the agent client endpoint. +// This option overrides any value set for the +// OTEL_EXPORTER_JAEGER_AGENT_HOST environment variable. +// If this option is not passed and the env var is not set, "localhost" will be used by default. +func WithAgentHost(host string) AgentEndpointOption { + return agentEndpointOptionFunc(func(o *agentEndpointConfig) { + o.Host = host + }) +} + +// WithAgentPort sets a port to be used in the agent client endpoint. +// This option overrides any value set for the +// OTEL_EXPORTER_JAEGER_AGENT_PORT environment variable. +// If this option is not passed and the env var is not set, "6831" will be used by default. +func WithAgentPort(port string) AgentEndpointOption { + return agentEndpointOptionFunc(func(o *agentEndpointConfig) { + o.Port = port + }) +} + +// WithLogger sets a logger to be used by agent client. +func WithLogger(logger *log.Logger) AgentEndpointOption { + return agentEndpointOptionFunc(func(o *agentEndpointConfig) { + o.Logger = logger + }) +} + +// WithDisableAttemptReconnecting sets option to disable reconnecting udp client. +func WithDisableAttemptReconnecting() AgentEndpointOption { + return agentEndpointOptionFunc(func(o *agentEndpointConfig) { + o.AttemptReconnecting = false + }) +} + +// WithAttemptReconnectingInterval sets the interval between attempts to re resolve agent endpoint. +func WithAttemptReconnectingInterval(interval time.Duration) AgentEndpointOption { + return agentEndpointOptionFunc(func(o *agentEndpointConfig) { + o.AttemptReconnectInterval = interval + }) +} + +// WithMaxPacketSize sets the maximum UDP packet size for transport to the Jaeger agent. +func WithMaxPacketSize(size int) AgentEndpointOption { + return agentEndpointOptionFunc(func(o *agentEndpointConfig) { + o.MaxPacketSize = size + }) +} + +// WithCollectorEndpoint defines the full URL to the Jaeger HTTP Thrift collector. This will +// use the following environment variables for configuration if no explicit option is provided: +// +// - OTEL_EXPORTER_JAEGER_ENDPOINT is the HTTP endpoint for sending spans directly to a collector. +// - OTEL_EXPORTER_JAEGER_USER is the username to be sent as authentication to the collector endpoint. +// - OTEL_EXPORTER_JAEGER_PASSWORD is the password to be sent as authentication to the collector endpoint. +// +// The passed options will take precedence over any environment variables. +// If neither values are provided for the endpoint, the default value of "http://localhost:14268/api/traces" will be used. +// If neither values are provided for the username or the password, they will not be set since there is no default. +func WithCollectorEndpoint(options ...CollectorEndpointOption) EndpointOption { + return endpointOptionFunc(func() (batchUploader, error) { + cfg := &collectorEndpointConfig{ + endpoint: envOr(envEndpoint, "http://localhost:14268/api/traces"), + username: envOr(envUser, ""), + password: envOr(envPassword, ""), + httpClient: http.DefaultClient, + } + + for _, opt := range options { + opt.apply(cfg) + } + + return &collectorUploader{ + endpoint: cfg.endpoint, + username: cfg.username, + password: cfg.password, + httpClient: cfg.httpClient, + }, nil + }) +} + +type CollectorEndpointOption interface { + apply(*collectorEndpointConfig) +} + +type collectorEndpointConfig struct { + // endpoint for sending spans directly to a collector. + endpoint string + + // username to be used for authentication with the collector endpoint. + username string + + // password to be used for authentication with the collector endpoint. + password string + + // httpClient to be used to make requests to the collector endpoint. + httpClient *http.Client +} + +type collectorEndpointOptionFunc func(*collectorEndpointConfig) + +func (fn collectorEndpointOptionFunc) apply(cfg *collectorEndpointConfig) { + fn(cfg) +} + +// WithEndpoint is the URL for the Jaeger collector that spans are sent to. +// This option overrides any value set for the +// OTEL_EXPORTER_JAEGER_ENDPOINT environment variable. +// If this option is not passed and the environment variable is not set, +// "http://localhost:14268/api/traces" will be used by default. +func WithEndpoint(endpoint string) CollectorEndpointOption { + return collectorEndpointOptionFunc(func(o *collectorEndpointConfig) { + o.endpoint = endpoint + }) +} + +// WithUsername sets the username to be used in the authorization header sent for all requests to the collector. +// This option overrides any value set for the +// OTEL_EXPORTER_JAEGER_USER environment variable. +// If this option is not passed and the environment variable is not set, no username will be set. +func WithUsername(username string) CollectorEndpointOption { + return collectorEndpointOptionFunc(func(o *collectorEndpointConfig) { + o.username = username + }) +} + +// WithPassword sets the password to be used in the authorization header sent for all requests to the collector. +// This option overrides any value set for the +// OTEL_EXPORTER_JAEGER_PASSWORD environment variable. +// If this option is not passed and the environment variable is not set, no password will be set. +func WithPassword(password string) CollectorEndpointOption { + return collectorEndpointOptionFunc(func(o *collectorEndpointConfig) { + o.password = password + }) +} + +// WithHTTPClient sets the http client to be used to make request to the collector endpoint. +func WithHTTPClient(client *http.Client) CollectorEndpointOption { + return collectorEndpointOptionFunc(func(o *collectorEndpointConfig) { + o.httpClient = client + }) +} + +// agentUploader implements batchUploader interface sending batches to +// Jaeger through the UDP agent. +type agentUploader struct { + client *agentClientUDP +} + +var _ batchUploader = (*agentUploader)(nil) + +func (a *agentUploader) shutdown(ctx context.Context) error { + done := make(chan error, 1) + go func() { + done <- a.client.Close() + }() + + select { + case <-ctx.Done(): + // Prioritize not blocking the calling thread and just leak the + // spawned goroutine to close the client. + return ctx.Err() + case err := <-done: + return err + } +} + +func (a *agentUploader) upload(ctx context.Context, batch *gen.Batch) error { + return a.client.EmitBatch(ctx, batch) +} + +// collectorUploader implements batchUploader interface sending batches to +// Jaeger through the collector http endpoint. +type collectorUploader struct { + endpoint string + username string + password string + httpClient *http.Client +} + +var _ batchUploader = (*collectorUploader)(nil) + +func (c *collectorUploader) shutdown(ctx context.Context) error { + // The Exporter will cancel any active exports and will prevent all + // subsequent exports, so nothing to do here. + return nil +} + +func (c *collectorUploader) upload(ctx context.Context, batch *gen.Batch) error { + body, err := serialize(batch) + if err != nil { + return err + } + req, err := http.NewRequestWithContext(ctx, "POST", c.endpoint, body) + if err != nil { + return err + } + if c.username != "" && c.password != "" { + req.SetBasicAuth(c.username, c.password) + } + req.Header.Set("Content-Type", "application/x-thrift") + + resp, err := c.httpClient.Do(req) + if err != nil { + return err + } + + _, _ = io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return fmt.Errorf("failed to upload traces; HTTP status code: %d", resp.StatusCode) + } + return nil +} + +func serialize(obj thrift.TStruct) (*bytes.Buffer, error) { + buf := thrift.NewTMemoryBuffer() + if err := obj.Write(context.Background(), thrift.NewTBinaryProtocolConf(buf, &thrift.TConfiguration{})); err != nil { + return nil, err + } + return buf.Buffer, nil +} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/README.md b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/README.md deleted file mode 100644 index f0c58564a..000000000 --- a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/README.md +++ /dev/null @@ -1,8 +0,0 @@ -# OpenTelemetry-Go Jaeger Exporter - -OpenTelemetry Jaeger exporter - -## Installation -``` -go get -u go.opentelemetry.io/otel/exporters/trace/jaeger -``` diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/agent.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/agent.go deleted file mode 100644 index f21a64687..000000000 --- a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/agent.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger // import "go.opentelemetry.io/otel/exporters/trace/jaeger" - -import ( - "fmt" - "io" - "log" - "net" - "time" - - "github.com/apache/thrift/lib/go/thrift" - - gen "go.opentelemetry.io/otel/exporters/trace/jaeger/internal/gen-go/jaeger" -) - -// udpPacketMaxLength is the max size of UDP packet we want to send, synced with jaeger-agent -const udpPacketMaxLength = 65000 - -// agentClientUDP is a UDP client to Jaeger agent that implements gen.Agent interface. -type agentClientUDP struct { - gen.Agent - io.Closer - - connUDP udpConn - client *gen.AgentClient - maxPacketSize int // max size of datagram in bytes - thriftBuffer *thrift.TMemoryBuffer // buffer used to calculate byte size of a span -} - -type udpConn interface { - Write([]byte) (int, error) - SetWriteBuffer(int) error - Close() error -} - -type agentClientUDPParams struct { - HostPort string - MaxPacketSize int - Logger *log.Logger - AttemptReconnecting bool - AttemptReconnectInterval time.Duration -} - -// newAgentClientUDP creates a client that sends spans to Jaeger Agent over UDP. -func newAgentClientUDP(params agentClientUDPParams) (*agentClientUDP, error) { - // validate hostport - if _, _, err := net.SplitHostPort(params.HostPort); err != nil { - return nil, err - } - - if params.MaxPacketSize <= 0 { - params.MaxPacketSize = udpPacketMaxLength - } - - if params.AttemptReconnecting && params.AttemptReconnectInterval <= 0 { - params.AttemptReconnectInterval = time.Second * 30 - } - - thriftBuffer := thrift.NewTMemoryBufferLen(params.MaxPacketSize) - protocolFactory := thrift.NewTCompactProtocolFactory() - client := gen.NewAgentClientFactory(thriftBuffer, protocolFactory) - - var connUDP udpConn - var err error - - if params.AttemptReconnecting { - // host is hostname, setup resolver loop in case host record changes during operation - connUDP, err = newReconnectingUDPConn(params.HostPort, params.MaxPacketSize, params.AttemptReconnectInterval, net.ResolveUDPAddr, net.DialUDP, params.Logger) - if err != nil { - return nil, err - } - } else { - destAddr, err := net.ResolveUDPAddr("udp", params.HostPort) - if err != nil { - return nil, err - } - - connUDP, err = net.DialUDP(destAddr.Network(), nil, destAddr) - if err != nil { - return nil, err - } - } - - if err := connUDP.SetWriteBuffer(params.MaxPacketSize); err != nil { - return nil, err - } - - return &agentClientUDP{ - connUDP: connUDP, - client: client, - maxPacketSize: params.MaxPacketSize, - thriftBuffer: thriftBuffer, - }, nil -} - -// EmitBatch implements EmitBatch() of Agent interface -func (a *agentClientUDP) EmitBatch(batch *gen.Batch) error { - a.thriftBuffer.Reset() - a.client.SeqId = 0 // we have no need for distinct SeqIds for our one-way UDP messages - if err := a.client.EmitBatch(batch); err != nil { - return err - } - if a.thriftBuffer.Len() > a.maxPacketSize { - return fmt.Errorf("data does not fit within one UDP packet; size %d, max %d, spans %d", - a.thriftBuffer.Len(), a.maxPacketSize, len(batch.Spans)) - } - _, err := a.connUDP.Write(a.thriftBuffer.Bytes()) - return err -} - -// Close implements Close() of io.Closer and closes the underlying UDP connection. -func (a *agentClientUDP) Close() error { - return a.connUDP.Close() -} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/env.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/env.go deleted file mode 100644 index d2f7fe284..000000000 --- a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/env.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger // import "go.opentelemetry.io/otel/exporters/trace/jaeger" - -import ( - "errors" - "os" - "strconv" - "strings" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/label" -) - -// Environment variable names -const ( - // The service name. - envServiceName = "JAEGER_SERVICE_NAME" - // Whether the exporter is disabled or not. (default false). - envDisabled = "JAEGER_DISABLED" - // A comma separated list of name=value tracer-level tags, which get added to all reported spans. - // The value can also refer to an environment variable using the format ${envVarName:defaultValue}. - envTags = "JAEGER_TAGS" - // The HTTP endpoint for sending spans directly to a collector, - // i.e. http://jaeger-collector:14268/api/traces. - envEndpoint = "JAEGER_ENDPOINT" - // Username to send as part of "Basic" authentication to the collector endpoint. - envUser = "JAEGER_USER" - // Password to send as part of "Basic" authentication to the collector endpoint. - envPassword = "JAEGER_PASSWORD" -) - -// CollectorEndpointFromEnv return environment variable value of JAEGER_ENDPOINT -func CollectorEndpointFromEnv() string { - return os.Getenv(envEndpoint) -} - -// WithCollectorEndpointOptionFromEnv uses environment variables to set the username and password -// if basic auth is required. -func WithCollectorEndpointOptionFromEnv() CollectorEndpointOption { - return func(o *CollectorEndpointOptions) { - if e := os.Getenv(envUser); e != "" { - o.username = e - } - if e := os.Getenv(envPassword); e != "" { - o.password = os.Getenv(envPassword) - } - } -} - -// WithDisabledFromEnv uses environment variables and overrides disabled field. -func WithDisabledFromEnv() Option { - return func(o *options) { - if e := os.Getenv(envDisabled); e != "" { - if v, err := strconv.ParseBool(e); err == nil { - o.Disabled = v - } - } - } -} - -// ProcessFromEnv parse environment variables into jaeger exporter's Process. -// It will return a nil tag slice if the environment variable JAEGER_TAGS is malformed. -func ProcessFromEnv() Process { - var p Process - if e := os.Getenv(envServiceName); e != "" { - p.ServiceName = e - } - if e := os.Getenv(envTags); e != "" { - tags, err := parseTags(e) - if err != nil { - otel.Handle(err) - } else { - p.Tags = tags - } - } - - return p -} - -// WithProcessFromEnv uses environment variables and overrides jaeger exporter's Process. -func WithProcessFromEnv() Option { - return func(o *options) { - p := ProcessFromEnv() - if p.ServiceName != "" { - o.Process.ServiceName = p.ServiceName - } - if len(p.Tags) != 0 { - o.Process.Tags = p.Tags - } - } -} - -var errTagValueNotFound = errors.New("missing tag value") -var errTagEnvironmentDefaultValueNotFound = errors.New("missing default value for tag environment value") - -// parseTags parses the given string into a collection of Tags. -// Spec for this value: -// - comma separated list of key=value -// - value can be specified using the notation ${envVar:defaultValue}, where `envVar` -// is an environment variable and `defaultValue` is the value to use in case the env var is not set -func parseTags(sTags string) ([]label.KeyValue, error) { - pairs := strings.Split(sTags, ",") - tags := make([]label.KeyValue, len(pairs)) - for i, p := range pairs { - field := strings.SplitN(p, "=", 2) - if len(field) != 2 { - return nil, errTagValueNotFound - } - k, v := strings.TrimSpace(field[0]), strings.TrimSpace(field[1]) - - if strings.HasPrefix(v, "${") && strings.HasSuffix(v, "}") { - ed := strings.SplitN(v[2:len(v)-1], ":", 2) - if len(ed) != 2 { - return nil, errTagEnvironmentDefaultValueNotFound - } - e, d := ed[0], ed[1] - v = os.Getenv(e) - if v == "" && d != "" { - v = d - } - } - - tags[i] = parseKeyValue(k, v) - } - - return tags, nil -} - -func parseKeyValue(k, v string) label.KeyValue { - return label.KeyValue{ - Key: label.Key(k), - Value: parseValue(v), - } -} - -func parseValue(str string) label.Value { - if v, err := strconv.ParseInt(str, 10, 64); err == nil { - return label.Int64Value(v) - } - if v, err := strconv.ParseFloat(str, 64); err == nil { - return label.Float64Value(v) - } - if v, err := strconv.ParseBool(str); err == nil { - return label.BoolValue(v) - } - - // Fallback - return label.StringValue(str) -} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/go.mod b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/go.mod deleted file mode 100644 index deb27449b..000000000 --- a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/go.mod +++ /dev/null @@ -1,17 +0,0 @@ -module go.opentelemetry.io/otel/exporters/trace/jaeger - -go 1.14 - -replace ( - go.opentelemetry.io/otel => ../../.. - go.opentelemetry.io/otel/sdk => ../../../sdk -) - -require ( - github.com/apache/thrift v0.13.0 - github.com/google/go-cmp v0.5.4 - github.com/stretchr/testify v1.6.1 - go.opentelemetry.io/otel v0.15.0 - go.opentelemetry.io/otel/sdk v0.15.0 - google.golang.org/api v0.32.0 -) diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/go.sum b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/go.sum deleted file mode 100644 index 5ef4eb0bc..000000000 --- a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/go.sum +++ /dev/null @@ -1,399 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DataDog/sketches-go v0.0.1/go.mod h1:Q5DbzQ+3AkgGwymQO7aZFNP7ns2lZKGtvRBzRXfdi60= -github.com/apache/thrift v0.13.0 h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI= -github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1 h1:JFrFEBb2xKufg6XkJsJr+WbKb4FQlURi5RUcBveYu9k= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642 h1:B6caxRw+hozq68X2MY7jEpZh/cr4/aHLv9xU8Kkadrw= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858 h1:xLt+iB5ksWcZVxqc+g9K41ZHy+6MKWfXCDsjSThnsPA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0 h1:yfrXXP61wVuLb0vBcG6qaOoIoqYEzOQS8jum51jkv2w= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.32.0 h1:Le77IccnTqEa8ryp9wIpX5W3zYm7Gf9LhOp9PHcwFts= -google.golang.org/api v0.32.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c h1:Lq4llNryJoaVFRmvrIwC/ZHH7tNt4tUYIu8+se2aayY= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d h1:92D1fum1bJLKSdr11OJ+54YeCMCGYIygTA7R/YZxH5M= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0 h1:T7P4R73V3SSDPhH7WW7ATbfViLtmamH0DKrP3f9AuDI= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1 h1:SfXqXS5hkufcdZ/mHtYCh53P2b+92WQq/DZcKLgsFRs= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/internal/gen-go/jaeger/GoUnusedProtection__.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/internal/gen-go/jaeger/GoUnusedProtection__.go deleted file mode 100644 index 345a65acb..000000000 --- a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/internal/gen-go/jaeger/GoUnusedProtection__.go +++ /dev/null @@ -1,6 +0,0 @@ -// Autogenerated by Thrift Compiler (0.11.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package jaeger - -var GoUnusedProtection__ int diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/internal/gen-go/jaeger/agent.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/internal/gen-go/jaeger/agent.go deleted file mode 100644 index 88d2df576..000000000 --- a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/internal/gen-go/jaeger/agent.go +++ /dev/null @@ -1,244 +0,0 @@ -// Autogenerated by Thrift Compiler (0.9.3) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package jaeger - -import ( - "bytes" - "context" - "fmt" - - "github.com/apache/thrift/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = bytes.Equal - -type Agent interface { - // Parameters: - // - Batch - EmitBatch(batch *Batch) (err error) -} - -type AgentClient struct { - Transport thrift.TTransport - ProtocolFactory thrift.TProtocolFactory - InputProtocol thrift.TProtocol - OutputProtocol thrift.TProtocol - SeqId int32 -} - -func NewAgentClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AgentClient { - return &AgentClient{Transport: t, - ProtocolFactory: f, - InputProtocol: f.GetProtocol(t), - OutputProtocol: f.GetProtocol(t), - SeqId: 0, - } -} - -func NewAgentClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AgentClient { - return &AgentClient{Transport: t, - ProtocolFactory: nil, - InputProtocol: iprot, - OutputProtocol: oprot, - SeqId: 0, - } -} - -// Parameters: -// - Batch -func (p *AgentClient) EmitBatch(batch *Batch) (err error) { - if err = p.sendEmitBatch(batch); err != nil { - return - } - return -} - -func (p *AgentClient) sendEmitBatch(batch *Batch) (err error) { - oprot := p.OutputProtocol - if oprot == nil { - oprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.OutputProtocol = oprot - } - p.SeqId++ - if err = oprot.WriteMessageBegin("emitBatch", thrift.ONEWAY, p.SeqId); err != nil { - return - } - args := AgentEmitBatchArgs{ - Batch: batch, - } - if err = args.Write(oprot); err != nil { - return - } - if err = oprot.WriteMessageEnd(); err != nil { - return - } - return oprot.Flush(context.Background()) -} - -type AgentProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler Agent -} - -func (p *AgentProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor -} - -func (p *AgentProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok -} - -func (p *AgentProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap -} - -func NewAgentProcessor(handler Agent) *AgentProcessor { - - self0 := &AgentProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self0.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler: handler} - return self0 -} - -func (p *AgentProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - ctx := context.Background() - name, _, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return false, err - } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(ctx, seqId, iprot, oprot) - } - iprot.Skip(thrift.STRUCT) - iprot.ReadMessageEnd() - x1 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) - oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) - x1.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, x1 -} - -type agentProcessorEmitBatch struct { - handler Agent -} - -func (p *agentProcessorEmitBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := AgentEmitBatchArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - return false, err - } - - iprot.ReadMessageEnd() - var err2 error - if err2 = p.handler.EmitBatch(args.Batch); err2 != nil { - return true, err2 - } - return true, nil -} - -// HELPER FUNCTIONS AND STRUCTURES - -// Attributes: -// - Batch -type AgentEmitBatchArgs struct { - Batch *Batch `thrift:"batch,1" json:"batch"` -} - -func NewAgentEmitBatchArgs() *AgentEmitBatchArgs { - return &AgentEmitBatchArgs{} -} - -var AgentEmitBatchArgs_Batch_DEFAULT *Batch - -func (p *AgentEmitBatchArgs) GetBatch() *Batch { - if !p.IsSetBatch() { - return AgentEmitBatchArgs_Batch_DEFAULT - } - return p.Batch -} -func (p *AgentEmitBatchArgs) IsSetBatch() bool { - return p.Batch != nil -} - -func (p *AgentEmitBatchArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AgentEmitBatchArgs) readField1(iprot thrift.TProtocol) error { - p.Batch = &Batch{} - if err := p.Batch.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err) - } - return nil -} - -func (p *AgentEmitBatchArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("emitBatch_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AgentEmitBatchArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("batch", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err) - } - if err := p.Batch.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err) - } - return err -} - -func (p *AgentEmitBatchArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p) -} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/jaeger.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/jaeger.go deleted file mode 100644 index bcee6b8e2..000000000 --- a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/jaeger.go +++ /dev/null @@ -1,448 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger // import "go.opentelemetry.io/otel/exporters/trace/jaeger" - -import ( - "context" - "encoding/binary" - "fmt" - "sync" - - "google.golang.org/api/support/bundler" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/codes" - gen "go.opentelemetry.io/otel/exporters/trace/jaeger/internal/gen-go/jaeger" - "go.opentelemetry.io/otel/label" - export "go.opentelemetry.io/otel/sdk/export/trace" - sdktrace "go.opentelemetry.io/otel/sdk/trace" - "go.opentelemetry.io/otel/trace" -) - -const ( - defaultServiceName = "OpenTelemetry" - - keyInstrumentationLibraryName = "otel.instrumentation_library.name" - keyInstrumentationLibraryVersion = "otel.instrumentation_library.version" -) - -type Option func(*options) - -// options are the options to be used when initializing a Jaeger export. -type options struct { - // Process contains the information about the exporting process. - Process Process - - // BufferMaxCount defines the total number of traces that can be buffered in memory - BufferMaxCount int - - // BatchMaxCount defines the maximum number of spans sent in one batch - BatchMaxCount int - - Config *sdktrace.Config - - Disabled bool -} - -// WithProcess sets the process with the information about the exporting process. -func WithProcess(process Process) Option { - return func(o *options) { - o.Process = process - } -} - -// WithBufferMaxCount defines the total number of traces that can be buffered in memory -func WithBufferMaxCount(bufferMaxCount int) Option { - return func(o *options) { - o.BufferMaxCount = bufferMaxCount - } -} - -// WithBatchMaxCount defines the maximum number of spans in one batch -func WithBatchMaxCount(batchMaxCount int) Option { - return func(o *options) { - o.BatchMaxCount = batchMaxCount - } -} - -// WithSDK sets the SDK config for the exporter pipeline. -func WithSDK(config *sdktrace.Config) Option { - return func(o *options) { - o.Config = config - } -} - -// WithDisabled option will cause pipeline methods to use -// a no-op provider -func WithDisabled(disabled bool) Option { - return func(o *options) { - o.Disabled = disabled - } -} - -// NewRawExporter returns an OTel Exporter implementation that exports the -// collected spans to Jaeger. -// -// It will IGNORE Disabled option. -func NewRawExporter(endpointOption EndpointOption, opts ...Option) (*Exporter, error) { - uploader, err := endpointOption() - if err != nil { - return nil, err - } - - o := options{} - opts = append(opts, WithProcessFromEnv()) - for _, opt := range opts { - opt(&o) - } - - service := o.Process.ServiceName - if service == "" { - service = defaultServiceName - } - tags := make([]*gen.Tag, 0, len(o.Process.Tags)) - for _, tag := range o.Process.Tags { - t := keyValueToTag(tag) - if t != nil { - tags = append(tags, t) - } - } - e := &Exporter{ - uploader: uploader, - process: &gen.Process{ - ServiceName: service, - Tags: tags, - }, - o: o, - } - bundler := bundler.NewBundler((*gen.Span)(nil), func(bundle interface{}) { - if err := e.upload(bundle.([]*gen.Span)); err != nil { - otel.Handle(err) - } - }) - - // Set BufferedByteLimit with the total number of spans that are permissible to be held in memory. - // This needs to be done since the size of messages is always set to 1. Failing to set this would allow - // 1G messages to be held in memory since that is the default value of BufferedByteLimit. - if o.BufferMaxCount != 0 { - bundler.BufferedByteLimit = o.BufferMaxCount - } - - // The default value bundler uses is 10, increase to send larger batches - if o.BatchMaxCount != 0 { - bundler.BundleCountThreshold = o.BatchMaxCount - } - - e.bundler = bundler - return e, nil -} - -// NewExportPipeline sets up a complete export pipeline -// with the recommended setup for trace provider -func NewExportPipeline(endpointOption EndpointOption, opts ...Option) (trace.TracerProvider, func(), error) { - o := options{} - opts = append(opts, WithDisabledFromEnv()) - for _, opt := range opts { - opt(&o) - } - if o.Disabled { - return trace.NewNoopTracerProvider(), func() {}, nil - } - - exporter, err := NewRawExporter(endpointOption, opts...) - if err != nil { - return nil, nil, err - } - - pOpts := []sdktrace.TracerProviderOption{sdktrace.WithSyncer(exporter)} - if exporter.o.Config != nil { - pOpts = append(pOpts, sdktrace.WithConfig(*exporter.o.Config)) - } - tp := sdktrace.NewTracerProvider(pOpts...) - return tp, exporter.Flush, nil -} - -// InstallNewPipeline instantiates a NewExportPipeline with the -// recommended configuration and registers it globally. -func InstallNewPipeline(endpointOption EndpointOption, opts ...Option) (func(), error) { - tp, flushFn, err := NewExportPipeline(endpointOption, opts...) - if err != nil { - return nil, err - } - - otel.SetTracerProvider(tp) - return flushFn, nil -} - -// Process contains the information exported to jaeger about the source -// of the trace data. -type Process struct { - // ServiceName is the Jaeger service name. - ServiceName string - - // Tags are added to Jaeger Process exports - Tags []label.KeyValue -} - -// Exporter is an implementation of an OTel SpanSyncer that uploads spans to -// Jaeger. -type Exporter struct { - process *gen.Process - bundler *bundler.Bundler - uploader batchUploader - o options - - stoppedMu sync.RWMutex - stopped bool -} - -var _ export.SpanExporter = (*Exporter)(nil) - -// ExportSpans exports SpanData to Jaeger. -func (e *Exporter) ExportSpans(ctx context.Context, spans []*export.SpanData) error { - e.stoppedMu.RLock() - stopped := e.stopped - e.stoppedMu.RUnlock() - if stopped { - return nil - } - - for _, span := range spans { - // TODO(jbd): Handle oversized bundlers. - err := e.bundler.Add(spanDataToThrift(span), 1) - if err != nil { - return fmt.Errorf("failed to bundle %q: %w", span.Name, err) - } - } - return nil -} - -// flush is used to wrap the bundler's Flush method for testing. -var flush = func(e *Exporter) { - e.bundler.Flush() -} - -// Shutdown stops the exporter flushing any pending exports. -func (e *Exporter) Shutdown(ctx context.Context) error { - e.stoppedMu.Lock() - e.stopped = true - e.stoppedMu.Unlock() - - done := make(chan struct{}, 1) - // Shadow so if the goroutine is leaked in testing it doesn't cause a race - // condition when the file level var is reset. - go func(FlushFunc func(*Exporter)) { - // The OpenTelemetry specification is explicit in not having this - // method block so the preference here is to orphan this goroutine if - // the context is canceled or times out while this flushing is - // occurring. This is a consequence of the bundler Flush method not - // supporting a context. - FlushFunc(e) - done <- struct{}{} - }(flush) - select { - case <-ctx.Done(): - return ctx.Err() - case <-done: - } - return nil -} - -func spanDataToThrift(data *export.SpanData) *gen.Span { - tags := make([]*gen.Tag, 0, len(data.Attributes)) - for _, kv := range data.Attributes { - tag := keyValueToTag(kv) - if tag != nil { - tags = append(tags, tag) - } - } - - // TODO (jmacd): OTel has a broad "last value wins" - // semantic. Should resources be appended before span - // attributes, above, to allow span attributes to - // overwrite resource attributes? - if data.Resource != nil { - for iter := data.Resource.Iter(); iter.Next(); { - if tag := keyValueToTag(iter.Attribute()); tag != nil { - tags = append(tags, tag) - } - } - } - if il := data.InstrumentationLibrary; il.Name != "" { - tags = append(tags, getStringTag(keyInstrumentationLibraryName, il.Name)) - if il.Version != "" { - tags = append(tags, getStringTag(keyInstrumentationLibraryVersion, il.Version)) - } - } - - tags = append(tags, - getInt64Tag("status.code", int64(data.StatusCode)), - getStringTag("status.message", data.StatusMessage), - getStringTag("span.kind", data.SpanKind.String()), - ) - - // Ensure that if Status.Code is not OK, that we set the "error" tag on the Jaeger span. - // See Issue https://github.com/census-instrumentation/opencensus-go/issues/1041 - if data.StatusCode != codes.Ok && data.StatusCode != codes.Unset { - tags = append(tags, getBoolTag("error", true)) - } - - var logs []*gen.Log - for _, a := range data.MessageEvents { - fields := make([]*gen.Tag, 0, len(a.Attributes)) - for _, kv := range a.Attributes { - tag := keyValueToTag(kv) - if tag != nil { - fields = append(fields, tag) - } - } - fields = append(fields, getStringTag("name", a.Name)) - logs = append(logs, &gen.Log{ - Timestamp: a.Time.UnixNano() / 1000, - Fields: fields, - }) - } - - var refs []*gen.SpanRef - for _, link := range data.Links { - refs = append(refs, &gen.SpanRef{ - TraceIdHigh: int64(binary.BigEndian.Uint64(link.TraceID[0:8])), - TraceIdLow: int64(binary.BigEndian.Uint64(link.TraceID[8:16])), - SpanId: int64(binary.BigEndian.Uint64(link.SpanID[:])), - // TODO(paivagustavo): properly set the reference type when specs are defined - // see https://github.com/open-telemetry/opentelemetry-specification/issues/65 - RefType: gen.SpanRefType_CHILD_OF, - }) - } - - return &gen.Span{ - TraceIdHigh: int64(binary.BigEndian.Uint64(data.SpanContext.TraceID[0:8])), - TraceIdLow: int64(binary.BigEndian.Uint64(data.SpanContext.TraceID[8:16])), - SpanId: int64(binary.BigEndian.Uint64(data.SpanContext.SpanID[:])), - ParentSpanId: int64(binary.BigEndian.Uint64(data.ParentSpanID[:])), - OperationName: data.Name, // TODO: if span kind is added then add prefix "Sent"/"Recv" - Flags: int32(data.SpanContext.TraceFlags), - StartTime: data.StartTime.UnixNano() / 1000, - Duration: data.EndTime.Sub(data.StartTime).Nanoseconds() / 1000, - Tags: tags, - Logs: logs, - References: refs, - } -} - -func keyValueToTag(keyValue label.KeyValue) *gen.Tag { - var tag *gen.Tag - switch keyValue.Value.Type() { - case label.STRING: - s := keyValue.Value.AsString() - tag = &gen.Tag{ - Key: string(keyValue.Key), - VStr: &s, - VType: gen.TagType_STRING, - } - case label.BOOL: - b := keyValue.Value.AsBool() - tag = &gen.Tag{ - Key: string(keyValue.Key), - VBool: &b, - VType: gen.TagType_BOOL, - } - case label.INT32: - i := int64(keyValue.Value.AsInt32()) - tag = &gen.Tag{ - Key: string(keyValue.Key), - VLong: &i, - VType: gen.TagType_LONG, - } - case label.INT64: - i := keyValue.Value.AsInt64() - tag = &gen.Tag{ - Key: string(keyValue.Key), - VLong: &i, - VType: gen.TagType_LONG, - } - case label.UINT32: - i := int64(keyValue.Value.AsUint32()) - tag = &gen.Tag{ - Key: string(keyValue.Key), - VLong: &i, - VType: gen.TagType_LONG, - } - case label.UINT64: - // we'll ignore the value if it overflows - if i := int64(keyValue.Value.AsUint64()); i >= 0 { - tag = &gen.Tag{ - Key: string(keyValue.Key), - VLong: &i, - VType: gen.TagType_LONG, - } - } - case label.FLOAT32: - f := float64(keyValue.Value.AsFloat32()) - tag = &gen.Tag{ - Key: string(keyValue.Key), - VDouble: &f, - VType: gen.TagType_DOUBLE, - } - case label.FLOAT64: - f := keyValue.Value.AsFloat64() - tag = &gen.Tag{ - Key: string(keyValue.Key), - VDouble: &f, - VType: gen.TagType_DOUBLE, - } - } - return tag -} - -func getInt64Tag(k string, i int64) *gen.Tag { - return &gen.Tag{ - Key: k, - VLong: &i, - VType: gen.TagType_LONG, - } -} - -func getStringTag(k, s string) *gen.Tag { - return &gen.Tag{ - Key: k, - VStr: &s, - VType: gen.TagType_STRING, - } -} - -func getBoolTag(k string, b bool) *gen.Tag { - return &gen.Tag{ - Key: k, - VBool: &b, - VType: gen.TagType_BOOL, - } -} - -// Flush waits for exported trace spans to be uploaded. -// -// This is useful if your program is ending and you do not want to lose recent spans. -func (e *Exporter) Flush() { - flush(e) -} - -func (e *Exporter) upload(spans []*gen.Span) error { - batch := &gen.Batch{ - Spans: spans, - Process: e.process, - } - - return e.uploader.upload(batch) -} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/uploader.go b/src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/uploader.go deleted file mode 100644 index 8cde86516..000000000 --- a/src/runtime/vendor/go.opentelemetry.io/otel/exporters/trace/jaeger/uploader.go +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger // import "go.opentelemetry.io/otel/exporters/trace/jaeger" - -import ( - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "net/http" - "time" - - "github.com/apache/thrift/lib/go/thrift" - - gen "go.opentelemetry.io/otel/exporters/trace/jaeger/internal/gen-go/jaeger" -) - -// batchUploader send a batch of spans to Jaeger -type batchUploader interface { - upload(batch *gen.Batch) error -} - -type EndpointOption func() (batchUploader, error) - -// WithAgentEndpoint instructs exporter to send spans to jaeger-agent at this address. -// For example, localhost:6831. -func WithAgentEndpoint(agentEndpoint string, options ...AgentEndpointOption) EndpointOption { - return func() (batchUploader, error) { - if agentEndpoint == "" { - return nil, errors.New("agentEndpoint must not be empty") - } - - o := &AgentEndpointOptions{ - agentClientUDPParams{ - HostPort: agentEndpoint, - AttemptReconnecting: true, - }, - } - - for _, opt := range options { - opt(o) - } - - client, err := newAgentClientUDP(o.agentClientUDPParams) - if err != nil { - return nil, err - } - - return &agentUploader{client: client}, nil - } -} - -type AgentEndpointOption func(o *AgentEndpointOptions) - -type AgentEndpointOptions struct { - agentClientUDPParams -} - -// WithLogger sets a logger to be used by agent client. -func WithLogger(logger *log.Logger) AgentEndpointOption { - return func(o *AgentEndpointOptions) { - o.Logger = logger - } -} - -// WithDisableAttemptReconnecting sets option to disable reconnecting udp client. -func WithDisableAttemptReconnecting() AgentEndpointOption { - return func(o *AgentEndpointOptions) { - o.AttemptReconnecting = false - } -} - -// WithAttemptReconnectingInterval sets the interval between attempts to re resolve agent endpoint. -func WithAttemptReconnectingInterval(interval time.Duration) AgentEndpointOption { - return func(o *AgentEndpointOptions) { - o.AttemptReconnectInterval = interval - } -} - -// WithCollectorEndpoint defines the full url to the Jaeger HTTP Thrift collector. -// For example, http://localhost:14268/api/traces -func WithCollectorEndpoint(collectorEndpoint string, options ...CollectorEndpointOption) EndpointOption { - return func() (batchUploader, error) { - // Overwrite collector endpoint if environment variables are available. - if e := CollectorEndpointFromEnv(); e != "" { - collectorEndpoint = e - } - - if collectorEndpoint == "" { - return nil, errors.New("collectorEndpoint must not be empty") - } - - o := &CollectorEndpointOptions{ - httpClient: http.DefaultClient, - } - - options = append(options, WithCollectorEndpointOptionFromEnv()) - for _, opt := range options { - opt(o) - } - - return &collectorUploader{ - endpoint: collectorEndpoint, - username: o.username, - password: o.password, - httpClient: o.httpClient, - }, nil - } -} - -type CollectorEndpointOption func(o *CollectorEndpointOptions) - -type CollectorEndpointOptions struct { - // username to be used if basic auth is required. - username string - - // password to be used if basic auth is required. - password string - - // httpClient to be used to make requests to the collector endpoint. - httpClient *http.Client -} - -// WithUsername sets the username to be used if basic auth is required. -func WithUsername(username string) CollectorEndpointOption { - return func(o *CollectorEndpointOptions) { - o.username = username - } -} - -// WithPassword sets the password to be used if basic auth is required. -func WithPassword(password string) CollectorEndpointOption { - return func(o *CollectorEndpointOptions) { - o.password = password - } -} - -// WithHTTPClient sets the http client to be used to make request to the collector endpoint. -func WithHTTPClient(client *http.Client) CollectorEndpointOption { - return func(o *CollectorEndpointOptions) { - o.httpClient = client - } -} - -// agentUploader implements batchUploader interface sending batches to -// Jaeger through the UDP agent. -type agentUploader struct { - client *agentClientUDP -} - -var _ batchUploader = (*agentUploader)(nil) - -func (a *agentUploader) upload(batch *gen.Batch) error { - return a.client.EmitBatch(batch) -} - -// collectorUploader implements batchUploader interface sending batches to -// Jaeger through the collector http endpoint. -type collectorUploader struct { - endpoint string - username string - password string - httpClient *http.Client -} - -var _ batchUploader = (*collectorUploader)(nil) - -func (c *collectorUploader) upload(batch *gen.Batch) error { - body, err := serialize(batch) - if err != nil { - return err - } - req, err := http.NewRequest("POST", c.endpoint, body) - if err != nil { - return err - } - if c.username != "" && c.password != "" { - req.SetBasicAuth(c.username, c.password) - } - req.Header.Set("Content-Type", "application/x-thrift") - - resp, err := c.httpClient.Do(req) - if err != nil { - return err - } - - _, _ = io.Copy(ioutil.Discard, resp.Body) - resp.Body.Close() - - if resp.StatusCode < 200 || resp.StatusCode >= 300 { - return fmt.Errorf("failed to upload traces; HTTP status code: %d", resp.StatusCode) - } - return nil -} - -func serialize(obj thrift.TStruct) (*bytes.Buffer, error) { - buf := thrift.NewTMemoryBuffer() - if err := obj.Write(thrift.NewTBinaryProtocolTransport(buf)); err != nil { - return nil, err - } - return buf.Buffer, nil -} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh b/src/runtime/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh index d1c892f0e..9a58fb1d3 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh +++ b/src/runtime/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh @@ -23,14 +23,19 @@ fi p=$(pwd) mod_dirs=() -mapfile -t mod_dirs < <(find "${top_dir}" -type f -name 'go.mod' -exec dirname {} \; | sort) + +# Note `mapfile` does not exist in older bash versions: +# https://stackoverflow.com/questions/41475261/need-alternative-to-readarray-mapfile-for-script-on-older-version-of-bash + +while IFS= read -r line; do + mod_dirs+=("$line") +done < <(find "${top_dir}" -type f -name 'go.mod' -exec dirname {} \; | sort) for mod_dir in "${mod_dirs[@]}"; do cd "${mod_dir}" - main_dirs=() - mapfile -t main_dirs < <(go list --find -f '{{.Name}}|{{.Dir}}' ./... | grep '^main|' | cut -f 2- -d '|') - for main_dir in "${main_dirs[@]}"; do - echo ".${main_dir#${p}}" - done + + while IFS= read -r line; do + echo ".${line#${p}}" + done < <(go list --find -f '{{.Name}}|{{.Dir}}' ./... | grep '^main|' | cut -f 2- -d '|') cd "${p}" done diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/go.mod b/src/runtime/vendor/go.opentelemetry.io/otel/go.mod index 184774636..447addf6b 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/go.mod +++ b/src/runtime/vendor/go.opentelemetry.io/otel/go.mod @@ -1,8 +1,71 @@ module go.opentelemetry.io/otel -go 1.14 +go 1.15 require ( - github.com/google/go-cmp v0.5.4 - github.com/stretchr/testify v1.6.1 + github.com/google/go-cmp v0.5.6 + github.com/stretchr/testify v1.7.0 + go.opentelemetry.io/otel/trace v1.0.0 ) + +replace go.opentelemetry.io/otel => ./ + +replace go.opentelemetry.io/otel/bridge/opencensus => ./bridge/opencensus + +replace go.opentelemetry.io/otel/bridge/opentracing => ./bridge/opentracing + +replace go.opentelemetry.io/otel/example/jaeger => ./example/jaeger + +replace go.opentelemetry.io/otel/example/namedtracer => ./example/namedtracer + +replace go.opentelemetry.io/otel/example/opencensus => ./example/opencensus + +replace go.opentelemetry.io/otel/example/otel-collector => ./example/otel-collector + +replace go.opentelemetry.io/otel/example/prom-collector => ./example/prom-collector + +replace go.opentelemetry.io/otel/example/prometheus => ./example/prometheus + +replace go.opentelemetry.io/otel/example/zipkin => ./example/zipkin + +replace go.opentelemetry.io/otel/exporters/prometheus => ./exporters/prometheus + +replace go.opentelemetry.io/otel/exporters/jaeger => ./exporters/jaeger + +replace go.opentelemetry.io/otel/exporters/zipkin => ./exporters/zipkin + +replace go.opentelemetry.io/otel/internal/tools => ./internal/tools + +replace go.opentelemetry.io/otel/sdk => ./sdk + +replace go.opentelemetry.io/otel/internal/metric => ./internal/metric + +replace go.opentelemetry.io/otel/metric => ./metric + +replace go.opentelemetry.io/otel/sdk/export/metric => ./sdk/export/metric + +replace go.opentelemetry.io/otel/sdk/metric => ./sdk/metric + +replace go.opentelemetry.io/otel/trace => ./trace + +replace go.opentelemetry.io/otel/example/passthrough => ./example/passthrough + +replace go.opentelemetry.io/otel/exporters/otlp/otlptrace => ./exporters/otlp/otlptrace + +replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => ./exporters/otlp/otlptrace/otlptracegrpc + +replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ./exporters/otlp/otlptrace/otlptracehttp + +replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ./exporters/otlp/otlpmetric + +replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ./exporters/otlp/otlpmetric/otlpmetricgrpc + +replace go.opentelemetry.io/otel/exporters/stdout/stdoutmetric => ./exporters/stdout/stdoutmetric + +replace go.opentelemetry.io/otel/exporters/stdout/stdouttrace => ./exporters/stdout/stdouttrace + +replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp => ./exporters/otlp/otlpmetric/otlpmetrichttp + +replace go.opentelemetry.io/otel/bridge/opencensus/test => ./bridge/opencensus/test + +replace go.opentelemetry.io/otel/example/fib => ./example/fib diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/go.sum b/src/runtime/vendor/go.opentelemetry.io/otel/go.sum index 76002a62e..f212493d5 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/go.sum +++ b/src/runtime/vendor/go.opentelemetry.io/otel/go.sum @@ -1,12 +1,12 @@ github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/handler.go b/src/runtime/vendor/go.opentelemetry.io/otel/handler.go index 27e1caa30..238f9de12 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/handler.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/handler.go @@ -26,36 +26,42 @@ var ( // throughout an OpenTelemetry instrumented project. When a user // specified ErrorHandler is registered (`SetErrorHandler`) all calls to // `Handle` and will be delegated to the registered ErrorHandler. - globalErrorHandler = &loggingErrorHandler{ - l: log.New(os.Stderr, "", log.LstdFlags), - } + globalErrorHandler = defaultErrorHandler() // delegateErrorHandlerOnce ensures that a user provided ErrorHandler is // only ever registered once. delegateErrorHandlerOnce sync.Once - // Comiple time check that loggingErrorHandler implements ErrorHandler. - _ ErrorHandler = (*loggingErrorHandler)(nil) + // Compile-time check that delegator implements ErrorHandler. + _ ErrorHandler = (*delegator)(nil) ) -// loggingErrorHandler logs all errors to STDERR. -type loggingErrorHandler struct { +type holder struct { + eh ErrorHandler +} + +func defaultErrorHandler() *atomic.Value { + v := &atomic.Value{} + v.Store(holder{eh: &delegator{l: log.New(os.Stderr, "", log.LstdFlags)}}) + return v +} + +// delegator logs errors if no delegate is set, otherwise they are delegated. +type delegator struct { delegate atomic.Value l *log.Logger } -// setDelegate sets the ErrorHandler delegate if one is not already set. -func (h *loggingErrorHandler) setDelegate(d ErrorHandler) { - if h.delegate.Load() != nil { - // Delegate already registered - return - } +// setDelegate sets the ErrorHandler delegate. +func (h *delegator) setDelegate(d ErrorHandler) { + // It is critical this is guarded with delegateErrorHandlerOnce, if it is + // called again with a different concrete type it will panic. h.delegate.Store(d) } -// Handle implements ErrorHandler. -func (h *loggingErrorHandler) Handle(err error) { +// Handle logs err if no delegate is set, otherwise it is delegated. +func (h *delegator) Handle(err error) { if d := h.delegate.Load(); d != nil { d.(ErrorHandler).Handle(err) return @@ -63,27 +69,39 @@ func (h *loggingErrorHandler) Handle(err error) { h.l.Print(err) } -// GetErrorHandler returns the global ErrorHandler instance. If no ErrorHandler -// instance has been set (`SetErrorHandler`), the default ErrorHandler which -// logs errors to STDERR is returned. +// GetErrorHandler returns the global ErrorHandler instance. +// +// The default ErrorHandler instance returned will log all errors to STDERR +// until an override ErrorHandler is set with SetErrorHandler. All +// ErrorHandler returned prior to this will automatically forward errors to +// the set instance instead of logging. +// +// Subsequent calls to SetErrorHandler after the first will not forward errors +// to the new ErrorHandler for prior returned instances. func GetErrorHandler() ErrorHandler { - return globalErrorHandler + return globalErrorHandler.Load().(holder).eh } -// SetErrorHandler sets the global ErrorHandler to be h. +// SetErrorHandler sets the global ErrorHandler to h. +// +// The first time this is called all ErrorHandler previously returned from +// GetErrorHandler will send errors to h instead of the default logging +// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not +// delegate errors to h. func SetErrorHandler(h ErrorHandler) { delegateErrorHandlerOnce.Do(func() { current := GetErrorHandler() if current == h { return } - if internalHandler, ok := current.(*loggingErrorHandler); ok { + if internalHandler, ok := current.(*delegator); ok { internalHandler.setDelegate(h) } }) + globalErrorHandler.Store(holder{eh: h}) } -// Handle is a convience function for ErrorHandler().Handle(err) +// Handle is a convenience function for ErrorHandler().Handle(err) func Handle(err error) { GetErrorHandler().Handle(err) } diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go b/src/runtime/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go index 6b5c0c2d9..87d0af405 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go @@ -12,327 +12,32 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package baggage provides types and functions to manage W3C Baggage. +/* +Package baggage provides base types and functionality to store and retrieve +baggage in Go context. This package exists because the OpenTracing bridge to +OpenTelemetry needs to synchronize state whenever baggage for a context is +modified and that context contains an OpenTracing span. If it were not for +this need this package would not need to exist and the +`go.opentelemetry.io/otel/baggage` package would be the singular place where +W3C baggage is handled. +*/ package baggage -import ( - "context" +// List is the collection of baggage members. The W3C allows for duplicates, +// but OpenTelemetry does not, therefore, this is represented as a map. +type List map[string]Item - "go.opentelemetry.io/otel/label" -) - -type rawMap map[label.Key]label.Value -type keySet map[label.Key]struct{} - -// Map is an immutable storage for correlations. -type Map struct { - m rawMap -} - -// MapUpdate contains information about correlation changes to be -// made. -type MapUpdate struct { - // DropSingleK contains a single key to be dropped from - // correlations. Use this to avoid an overhead of a slice - // allocation if there is only one key to drop. - DropSingleK label.Key - // DropMultiK contains all the keys to be dropped from - // correlations. - DropMultiK []label.Key - - // SingleKV contains a single key-value pair to be added to - // correlations. Use this to avoid an overhead of a slice - // allocation if there is only one key-value pair to add. - SingleKV label.KeyValue - // MultiKV contains all the key-value pairs to be added to - // correlations. - MultiKV []label.KeyValue -} - -func newMap(raw rawMap) Map { - return Map{ - m: raw, - } -} - -// NewEmptyMap creates an empty correlations map. -func NewEmptyMap() Map { - return newMap(nil) -} - -// NewMap creates a map with the contents of the update applied. In -// this function, having an update with DropSingleK or DropMultiK -// makes no sense - those fields are effectively ignored. -func NewMap(update MapUpdate) Map { - return NewEmptyMap().Apply(update) -} - -// Apply creates a copy of the map with the contents of the update -// applied. Apply will first drop the keys from DropSingleK and -// DropMultiK, then add key-value pairs from SingleKV and MultiKV. -func (m Map) Apply(update MapUpdate) Map { - delSet, addSet := getModificationSets(update) - mapSize := getNewMapSize(m.m, delSet, addSet) - - r := make(rawMap, mapSize) - for k, v := range m.m { - // do not copy items we want to drop - if _, ok := delSet[k]; ok { - continue - } - // do not copy items we would overwrite - if _, ok := addSet[k]; ok { - continue - } - r[k] = v - } - if update.SingleKV.Key.Defined() { - r[update.SingleKV.Key] = update.SingleKV.Value - } - for _, kv := range update.MultiKV { - r[kv.Key] = kv.Value - } - if len(r) == 0 { - r = nil - } - return newMap(r) -} - -func getModificationSets(update MapUpdate) (delSet, addSet keySet) { - deletionsCount := len(update.DropMultiK) - if update.DropSingleK.Defined() { - deletionsCount++ - } - if deletionsCount > 0 { - delSet = make(map[label.Key]struct{}, deletionsCount) - for _, k := range update.DropMultiK { - delSet[k] = struct{}{} - } - if update.DropSingleK.Defined() { - delSet[update.DropSingleK] = struct{}{} - } - } - - additionsCount := len(update.MultiKV) - if update.SingleKV.Key.Defined() { - additionsCount++ - } - if additionsCount > 0 { - addSet = make(map[label.Key]struct{}, additionsCount) - for _, k := range update.MultiKV { - addSet[k.Key] = struct{}{} - } - if update.SingleKV.Key.Defined() { - addSet[update.SingleKV.Key] = struct{}{} - } - } - - return -} - -func getNewMapSize(m rawMap, delSet, addSet keySet) int { - mapSizeDiff := 0 - for k := range addSet { - if _, ok := m[k]; !ok { - mapSizeDiff++ - } - } - for k := range delSet { - if _, ok := m[k]; ok { - if _, inAddSet := addSet[k]; !inAddSet { - mapSizeDiff-- - } - } - } - return len(m) + mapSizeDiff -} - -// Value gets a value from correlations map and returns a boolean -// value indicating whether the key exist in the map. -func (m Map) Value(k label.Key) (label.Value, bool) { - value, ok := m.m[k] - return value, ok -} - -// HasValue returns a boolean value indicating whether the key exist -// in the map. -func (m Map) HasValue(k label.Key) bool { - _, has := m.Value(k) - return has -} - -// Len returns a length of the map. -func (m Map) Len() int { - return len(m.m) -} - -// Foreach calls a passed callback once on each key-value pair until -// all the key-value pairs of the map were iterated or the callback -// returns false, whichever happens first. -func (m Map) Foreach(f func(label.KeyValue) bool) { - for k, v := range m.m { - if !f(label.KeyValue{ - Key: k, - Value: v, - }) { - return - } - } -} - -type correlationsType struct{} - -// SetHookFunc describes a type of a callback that is called when -// storing baggage in the context. -type SetHookFunc func(context.Context) context.Context - -// GetHookFunc describes a type of a callback that is called when -// getting baggage from the context. -type GetHookFunc func(context.Context, Map) Map - -// value under this key is either of type Map or correlationsData -var correlationsKey = &correlationsType{} - -type correlationsData struct { - m Map - setHook SetHookFunc - getHook GetHookFunc -} - -func (d correlationsData) isHookless() bool { - return d.setHook == nil && d.getHook == nil -} - -type hookKind int - -const ( - hookKindSet hookKind = iota - hookKindGet -) - -func (d *correlationsData) overrideHook(kind hookKind, setHook SetHookFunc, getHook GetHookFunc) { - switch kind { - case hookKindSet: - d.setHook = setHook - case hookKindGet: - d.getHook = getHook - } -} - -// ContextWithSetHook installs a hook function that will be invoked -// every time ContextWithMap is called. To avoid unnecessary callback -// invocations (recursive or not), the callback can temporarily clear -// the hooks from the context with the ContextWithNoHooks function. -// -// Note that NewContext also calls ContextWithMap, so the hook will be -// invoked. -// -// Passing nil SetHookFunc creates a context with no set hook to call. -// -// This function should not be used by applications or libraries. It -// is mostly for interoperation with other observability APIs. -func ContextWithSetHook(ctx context.Context, hook SetHookFunc) context.Context { - return contextWithHook(ctx, hookKindSet, hook, nil) +// Item is the value and metadata properties part of a list-member. +type Item struct { + Value string + Properties []Property } -// ContextWithGetHook installs a hook function that will be invoked -// every time MapFromContext is called. To avoid unnecessary callback -// invocations (recursive or not), the callback can temporarily clear -// the hooks from the context with the ContextWithNoHooks function. -// -// Note that NewContext also calls MapFromContext, so the hook will be -// invoked. -// -// Passing nil GetHookFunc creates a context with no get hook to call. -// -// This function should not be used by applications or libraries. It -// is mostly for interoperation with other observability APIs. -func ContextWithGetHook(ctx context.Context, hook GetHookFunc) context.Context { - return contextWithHook(ctx, hookKindGet, nil, hook) -} - -func contextWithHook(ctx context.Context, kind hookKind, setHook SetHookFunc, getHook GetHookFunc) context.Context { - switch v := ctx.Value(correlationsKey).(type) { - case correlationsData: - v.overrideHook(kind, setHook, getHook) - if v.isHookless() { - return context.WithValue(ctx, correlationsKey, v.m) - } - return context.WithValue(ctx, correlationsKey, v) - case Map: - return contextWithOneHookAndMap(ctx, kind, setHook, getHook, v) - default: - m := NewEmptyMap() - return contextWithOneHookAndMap(ctx, kind, setHook, getHook, m) - } -} - -func contextWithOneHookAndMap(ctx context.Context, kind hookKind, setHook SetHookFunc, getHook GetHookFunc, m Map) context.Context { - d := correlationsData{m: m} - d.overrideHook(kind, setHook, getHook) - if d.isHookless() { - return ctx - } - return context.WithValue(ctx, correlationsKey, d) -} - -// ContextWithNoHooks creates a context with all the hooks -// disabled. Also returns old set and get hooks. This function can be -// used to temporarily clear the context from hooks and then reinstate -// them by calling ContextWithSetHook and ContextWithGetHook functions -// passing the hooks returned by this function. -// -// This function should not be used by applications or libraries. It -// is mostly for interoperation with other observability APIs. -func ContextWithNoHooks(ctx context.Context) (context.Context, SetHookFunc, GetHookFunc) { - switch v := ctx.Value(correlationsKey).(type) { - case correlationsData: - return context.WithValue(ctx, correlationsKey, v.m), v.setHook, v.getHook - default: - return ctx, nil, nil - } -} - -// ContextWithMap returns a context with the Map entered into it. -func ContextWithMap(ctx context.Context, m Map) context.Context { - switch v := ctx.Value(correlationsKey).(type) { - case correlationsData: - v.m = m - ctx = context.WithValue(ctx, correlationsKey, v) - if v.setHook != nil { - ctx = v.setHook(ctx) - } - return ctx - default: - return context.WithValue(ctx, correlationsKey, m) - } -} - -// ContextWithNoCorrelationData returns a context stripped of correlation -// data. -func ContextWithNoCorrelationData(ctx context.Context) context.Context { - return context.WithValue(ctx, correlationsKey, nil) -} - -// NewContext returns a context with the map from passed context -// updated with the passed key-value pairs. -func NewContext(ctx context.Context, keyvalues ...label.KeyValue) context.Context { - return ContextWithMap(ctx, MapFromContext(ctx).Apply(MapUpdate{ - MultiKV: keyvalues, - })) -} +// Property is a metadata entry for a list-member. +type Property struct { + Key, Value string -// MapFromContext gets the current Map from a Context. -func MapFromContext(ctx context.Context) Map { - switch v := ctx.Value(correlationsKey).(type) { - case correlationsData: - if v.getHook != nil { - return v.getHook(ctx, v.m) - } - return v.m - case Map: - return v - default: - return NewEmptyMap() - } + // HasValue indicates if a zero-value value means the property does not + // have a value or if it was the zero-value. + HasValue bool } diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/internal/baggage/context.go b/src/runtime/vendor/go.opentelemetry.io/otel/internal/baggage/context.go new file mode 100644 index 000000000..7d8694e8c --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/internal/baggage/context.go @@ -0,0 +1,95 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package baggage + +import "context" + +type baggageContextKeyType int + +const baggageKey baggageContextKeyType = iota + +// SetHookFunc is a callback called when storing baggage in the context. +type SetHookFunc func(context.Context, List) context.Context + +// GetHookFunc is a callback called when getting baggage from the context. +type GetHookFunc func(context.Context, List) List + +type baggageState struct { + list List + + setHook SetHookFunc + getHook GetHookFunc +} + +// ContextWithSetHook returns a copy of parent with hook configured to be +// invoked every time ContextWithBaggage is called. +// +// Passing nil SetHookFunc creates a context with no set hook to call. +func ContextWithSetHook(parent context.Context, hook SetHookFunc) context.Context { + var s baggageState + switch v := parent.Value(baggageKey).(type) { + case baggageState: + s = v + } + + s.setHook = hook + return context.WithValue(parent, baggageKey, s) +} + +// ContextWithGetHook returns a copy of parent with hook configured to be +// invoked every time FromContext is called. +// +// Passing nil GetHookFunc creates a context with no get hook to call. +func ContextWithGetHook(parent context.Context, hook GetHookFunc) context.Context { + var s baggageState + switch v := parent.Value(baggageKey).(type) { + case baggageState: + s = v + } + + s.getHook = hook + return context.WithValue(parent, baggageKey, s) +} + +// ContextWithList returns a copy of parent with baggage. Passing nil list +// returns a context without any baggage. +func ContextWithList(parent context.Context, list List) context.Context { + var s baggageState + switch v := parent.Value(baggageKey).(type) { + case baggageState: + s = v + } + + s.list = list + ctx := context.WithValue(parent, baggageKey, s) + if s.setHook != nil { + ctx = s.setHook(ctx, list) + } + + return ctx +} + +// ListFromContext returns the baggage contained in ctx. +func ListFromContext(ctx context.Context) List { + switch v := ctx.Value(baggageKey).(type) { + case baggageState: + if v.getHook != nil { + return v.getHook(ctx, v.list) + } + return v.list + default: + return nil + } +} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/src/runtime/vendor/go.opentelemetry.io/otel/internal/global/meter.go deleted file mode 100644 index 8b288df78..000000000 --- a/src/runtime/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ /dev/null @@ -1,348 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package global - -import ( - "context" - "sync" - "sync/atomic" - "unsafe" - - "go.opentelemetry.io/otel/label" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/metric/number" - "go.opentelemetry.io/otel/metric/registry" -) - -// This file contains the forwarding implementation of MeterProvider used as -// the default global instance. Metric events using instruments provided by -// this implementation are no-ops until the first Meter implementation is set -// as the global provider. -// -// The implementation here uses Mutexes to maintain a list of active Meters in -// the MeterProvider and Instruments in each Meter, under the assumption that -// these interfaces are not performance-critical. -// -// We have the invariant that setDelegate() will be called before a new -// MeterProvider implementation is registered as the global provider. Mutexes -// in the MeterProvider and Meters ensure that each instrument has a delegate -// before the global provider is set. -// -// Bound instrument operations are implemented by delegating to the -// instrument after it is registered, with a sync.Once initializer to -// protect against races with Release(). -// -// Metric uniqueness checking is implemented by calling the exported -// methods of the api/metric/registry package. - -type meterKey struct { - Name, Version string -} - -type meterProvider struct { - delegate metric.MeterProvider - - // lock protects `delegate` and `meters`. - lock sync.Mutex - - // meters maintains a unique entry for every named Meter - // that has been registered through the global instance. - meters map[meterKey]*meterEntry -} - -type meterImpl struct { - delegate unsafe.Pointer // (*metric.MeterImpl) - - lock sync.Mutex - syncInsts []*syncImpl - asyncInsts []*asyncImpl -} - -type meterEntry struct { - unique metric.MeterImpl - impl meterImpl -} - -type instrument struct { - descriptor metric.Descriptor -} - -type syncImpl struct { - delegate unsafe.Pointer // (*metric.SyncImpl) - - instrument -} - -type asyncImpl struct { - delegate unsafe.Pointer // (*metric.AsyncImpl) - - instrument - - runner metric.AsyncRunner -} - -// SyncImpler is implemented by all of the sync metric -// instruments. -type SyncImpler interface { - SyncImpl() metric.SyncImpl -} - -// AsyncImpler is implemented by all of the async -// metric instruments. -type AsyncImpler interface { - AsyncImpl() metric.AsyncImpl -} - -type syncHandle struct { - delegate unsafe.Pointer // (*metric.BoundInstrumentImpl) - - inst *syncImpl - labels []label.KeyValue - - initialize sync.Once -} - -var _ metric.MeterProvider = &meterProvider{} -var _ metric.MeterImpl = &meterImpl{} -var _ metric.InstrumentImpl = &syncImpl{} -var _ metric.BoundSyncImpl = &syncHandle{} -var _ metric.AsyncImpl = &asyncImpl{} - -func (inst *instrument) Descriptor() metric.Descriptor { - return inst.descriptor -} - -// MeterProvider interface and delegation - -func newMeterProvider() *meterProvider { - return &meterProvider{ - meters: map[meterKey]*meterEntry{}, - } -} - -func (p *meterProvider) setDelegate(provider metric.MeterProvider) { - p.lock.Lock() - defer p.lock.Unlock() - - p.delegate = provider - for key, entry := range p.meters { - entry.impl.setDelegate(key.Name, key.Version, provider) - } - p.meters = nil -} - -func (p *meterProvider) Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter { - p.lock.Lock() - defer p.lock.Unlock() - - if p.delegate != nil { - return p.delegate.Meter(instrumentationName, opts...) - } - - key := meterKey{ - Name: instrumentationName, - Version: metric.NewMeterConfig(opts...).InstrumentationVersion, - } - entry, ok := p.meters[key] - if !ok { - entry = &meterEntry{} - entry.unique = registry.NewUniqueInstrumentMeterImpl(&entry.impl) - p.meters[key] = entry - - } - return metric.WrapMeterImpl(entry.unique, key.Name, metric.WithInstrumentationVersion(key.Version)) -} - -// Meter interface and delegation - -func (m *meterImpl) setDelegate(name, version string, provider metric.MeterProvider) { - m.lock.Lock() - defer m.lock.Unlock() - - d := new(metric.MeterImpl) - *d = provider.Meter(name, metric.WithInstrumentationVersion(version)).MeterImpl() - m.delegate = unsafe.Pointer(d) - - for _, inst := range m.syncInsts { - inst.setDelegate(*d) - } - m.syncInsts = nil - for _, obs := range m.asyncInsts { - obs.setDelegate(*d) - } - m.asyncInsts = nil -} - -func (m *meterImpl) NewSyncInstrument(desc metric.Descriptor) (metric.SyncImpl, error) { - m.lock.Lock() - defer m.lock.Unlock() - - if meterPtr := (*metric.MeterImpl)(atomic.LoadPointer(&m.delegate)); meterPtr != nil { - return (*meterPtr).NewSyncInstrument(desc) - } - - inst := &syncImpl{ - instrument: instrument{ - descriptor: desc, - }, - } - m.syncInsts = append(m.syncInsts, inst) - return inst, nil -} - -// Synchronous delegation - -func (inst *syncImpl) setDelegate(d metric.MeterImpl) { - implPtr := new(metric.SyncImpl) - - var err error - *implPtr, err = d.NewSyncInstrument(inst.descriptor) - - if err != nil { - // TODO: There is no standard way to deliver this error to the user. - // See https://github.com/open-telemetry/opentelemetry-go/issues/514 - // Note that the default SDK will not generate any errors yet, this is - // only for added safety. - panic(err) - } - - atomic.StorePointer(&inst.delegate, unsafe.Pointer(implPtr)) -} - -func (inst *syncImpl) Implementation() interface{} { - if implPtr := (*metric.SyncImpl)(atomic.LoadPointer(&inst.delegate)); implPtr != nil { - return (*implPtr).Implementation() - } - return inst -} - -func (inst *syncImpl) Bind(labels []label.KeyValue) metric.BoundSyncImpl { - if implPtr := (*metric.SyncImpl)(atomic.LoadPointer(&inst.delegate)); implPtr != nil { - return (*implPtr).Bind(labels) - } - return &syncHandle{ - inst: inst, - labels: labels, - } -} - -func (bound *syncHandle) Unbind() { - bound.initialize.Do(func() {}) - - implPtr := (*metric.BoundSyncImpl)(atomic.LoadPointer(&bound.delegate)) - - if implPtr == nil { - return - } - - (*implPtr).Unbind() -} - -// Async delegation - -func (m *meterImpl) NewAsyncInstrument( - desc metric.Descriptor, - runner metric.AsyncRunner, -) (metric.AsyncImpl, error) { - - m.lock.Lock() - defer m.lock.Unlock() - - if meterPtr := (*metric.MeterImpl)(atomic.LoadPointer(&m.delegate)); meterPtr != nil { - return (*meterPtr).NewAsyncInstrument(desc, runner) - } - - inst := &asyncImpl{ - instrument: instrument{ - descriptor: desc, - }, - runner: runner, - } - m.asyncInsts = append(m.asyncInsts, inst) - return inst, nil -} - -func (obs *asyncImpl) Implementation() interface{} { - if implPtr := (*metric.AsyncImpl)(atomic.LoadPointer(&obs.delegate)); implPtr != nil { - return (*implPtr).Implementation() - } - return obs -} - -func (obs *asyncImpl) setDelegate(d metric.MeterImpl) { - implPtr := new(metric.AsyncImpl) - - var err error - *implPtr, err = d.NewAsyncInstrument(obs.descriptor, obs.runner) - - if err != nil { - // TODO: There is no standard way to deliver this error to the user. - // See https://github.com/open-telemetry/opentelemetry-go/issues/514 - // Note that the default SDK will not generate any errors yet, this is - // only for added safety. - panic(err) - } - - atomic.StorePointer(&obs.delegate, unsafe.Pointer(implPtr)) -} - -// Metric updates - -func (m *meterImpl) RecordBatch(ctx context.Context, labels []label.KeyValue, measurements ...metric.Measurement) { - if delegatePtr := (*metric.MeterImpl)(atomic.LoadPointer(&m.delegate)); delegatePtr != nil { - (*delegatePtr).RecordBatch(ctx, labels, measurements...) - } -} - -func (inst *syncImpl) RecordOne(ctx context.Context, number number.Number, labels []label.KeyValue) { - if instPtr := (*metric.SyncImpl)(atomic.LoadPointer(&inst.delegate)); instPtr != nil { - (*instPtr).RecordOne(ctx, number, labels) - } -} - -// Bound instrument initialization - -func (bound *syncHandle) RecordOne(ctx context.Context, number number.Number) { - instPtr := (*metric.SyncImpl)(atomic.LoadPointer(&bound.inst.delegate)) - if instPtr == nil { - return - } - var implPtr *metric.BoundSyncImpl - bound.initialize.Do(func() { - implPtr = new(metric.BoundSyncImpl) - *implPtr = (*instPtr).Bind(bound.labels) - atomic.StorePointer(&bound.delegate, unsafe.Pointer(implPtr)) - }) - if implPtr == nil { - implPtr = (*metric.BoundSyncImpl)(atomic.LoadPointer(&bound.delegate)) - } - // This may still be nil if instrument was created and bound - // without a delegate, then the instrument was set to have a - // delegate and unbound. - if implPtr == nil { - return - } - (*implPtr).RecordOne(ctx, number) -} - -func AtomicFieldOffsets() map[string]uintptr { - return map[string]uintptr{ - "meterProvider.delegate": unsafe.Offsetof(meterProvider{}.delegate), - "meterImpl.delegate": unsafe.Offsetof(meterImpl{}.delegate), - "syncImpl.delegate": unsafe.Offsetof(syncImpl{}.delegate), - "asyncImpl.delegate": unsafe.Offsetof(asyncImpl{}.delegate), - "syncHandle.delegate": unsafe.Offsetof(syncHandle{}.delegate), - } -} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/internal/global/state.go b/src/runtime/vendor/go.opentelemetry.io/otel/internal/global/state.go index f3bf00351..c55345576 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/internal/global/state.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/internal/global/state.go @@ -18,7 +18,6 @@ import ( "sync" "sync/atomic" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" ) @@ -28,10 +27,6 @@ type ( tp trace.TracerProvider } - meterProviderHolder struct { - mp metric.MeterProvider - } - propagatorsHolder struct { tm propagation.TextMapPropagator } @@ -39,10 +34,8 @@ type ( var ( globalTracer = defaultTracerValue() - globalMeter = defaultMeterValue() globalPropagators = defaultPropagatorsValue() - delegateMeterOnce sync.Once delegateTraceOnce sync.Once delegateTextMapPropagatorOnce sync.Once ) @@ -69,28 +62,6 @@ func SetTracerProvider(tp trace.TracerProvider) { globalTracer.Store(tracerProviderHolder{tp: tp}) } -// MeterProvider is the internal implementation for global.MeterProvider. -func MeterProvider() metric.MeterProvider { - return globalMeter.Load().(meterProviderHolder).mp -} - -// SetMeterProvider is the internal implementation for global.SetMeterProvider. -func SetMeterProvider(mp metric.MeterProvider) { - delegateMeterOnce.Do(func() { - current := MeterProvider() - - if current == mp { - // Setting the provider to the prior default is nonsense, panic. - // Panic is acceptable because we are likely still early in the - // process lifetime. - panic("invalid MeterProvider, the global instance cannot be reinstalled") - } else if def, ok := current.(*meterProvider); ok { - def.setDelegate(mp) - } - }) - globalMeter.Store(meterProviderHolder{mp: mp}) -} - // TextMapPropagator is the internal implementation for global.TextMapPropagator. func TextMapPropagator() propagation.TextMapPropagator { return globalPropagators.Load().(propagatorsHolder).tm @@ -120,12 +91,6 @@ func defaultTracerValue() *atomic.Value { return v } -func defaultMeterValue() *atomic.Value { - v := &atomic.Value{} - v.Store(meterProviderHolder{mp: newMeterProvider()}) - return v -} - func defaultPropagatorsValue() *atomic.Value { v := &atomic.Value{} v.Store(propagatorsHolder{tm: newTextMapPropagator()}) @@ -135,9 +100,7 @@ func defaultPropagatorsValue() *atomic.Value { // ResetForTest restores the initial global state, for testing purposes. func ResetForTest() { globalTracer = defaultTracerValue() - globalMeter = defaultMeterValue() globalPropagators = defaultPropagatorsValue() - delegateMeterOnce = sync.Once{} delegateTraceOnce = sync.Once{} delegateTextMapPropagatorOnce = sync.Once{} } diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/src/runtime/vendor/go.opentelemetry.io/otel/internal/global/trace.go index 7b6993153..11c7c5568 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -34,8 +34,10 @@ SDK prior to any Tracer creation. import ( "context" "sync" + "sync/atomic" - "go.opentelemetry.io/otel/internal/trace/noop" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" ) @@ -44,9 +46,8 @@ import ( // All TracerProvider functionality is forwarded to a delegate once // configured. type tracerProvider struct { - mtx sync.Mutex - tracers []*tracer - + mtx sync.Mutex + tracers map[il]*tracer delegate trace.TracerProvider } @@ -60,17 +61,17 @@ var _ trace.TracerProvider = &tracerProvider{} // All Tracers provided prior to this function call are switched out to be // Tracers provided by provider. // -// Delegation only happens on the first call to this method. All subsequent -// calls result in no delegation changes. +// It is guaranteed by the caller that this happens only once. func (p *tracerProvider) setDelegate(provider trace.TracerProvider) { - if p.delegate != nil { - return - } - p.mtx.Lock() defer p.mtx.Unlock() p.delegate = provider + + if len(p.tracers) == 0 { + return + } + for _, t := range p.tracers { t.setDelegate(provider) } @@ -87,21 +88,42 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T return p.delegate.Tracer(name, opts...) } - t := &tracer{name: name, opts: opts} - p.tracers = append(p.tracers, t) + // At this moment it is guaranteed that no sdk is installed, save the tracer in the tracers map. + + c := trace.NewTracerConfig(opts...) + key := il{ + name: name, + version: c.InstrumentationVersion(), + } + + if p.tracers == nil { + p.tracers = make(map[il]*tracer) + } + + if val, ok := p.tracers[key]; ok { + return val + } + + t := &tracer{name: name, opts: opts, provider: p} + p.tracers[key] = t return t } +type il struct { + name string + version string +} + // tracer is a placeholder for a trace.Tracer. // // All Tracer functionality is forwarded to a delegate once configured. // Otherwise, all functionality is forwarded to a NoopTracer. type tracer struct { - once sync.Once - name string - opts []trace.TracerOption + name string + opts []trace.TracerOption + provider *tracerProvider - delegate trace.Tracer + delegate atomic.Value } // Compile-time guarantee that tracer implements the trace.Tracer interface. @@ -112,17 +134,59 @@ var _ trace.Tracer = &tracer{} // // All subsequent calls to the Tracer methods will be passed to the delegate. // -// Delegation only happens on the first call to this method. All subsequent -// calls result in no delegation changes. +// It is guaranteed by the caller that this happens only once. func (t *tracer) setDelegate(provider trace.TracerProvider) { - t.once.Do(func() { t.delegate = provider.Tracer(t.name, t.opts...) }) + t.delegate.Store(provider.Tracer(t.name, t.opts...)) } // Start implements trace.Tracer by forwarding the call to t.delegate if // set, otherwise it forwards the call to a NoopTracer. -func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanOption) (context.Context, trace.Span) { - if t.delegate != nil { - return t.delegate.Start(ctx, name, opts...) +func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + delegate := t.delegate.Load() + if delegate != nil { + return delegate.(trace.Tracer).Start(ctx, name, opts...) } - return noop.Tracer.Start(ctx, name, opts...) + + s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t} + ctx = trace.ContextWithSpan(ctx, s) + return ctx, s +} + +// nonRecordingSpan is a minimal implementation of a Span that wraps a +// SpanContext. It performs no operations other than to return the wrapped +// SpanContext. +type nonRecordingSpan struct { + sc trace.SpanContext + tracer *tracer } + +var _ trace.Span = nonRecordingSpan{} + +// SpanContext returns the wrapped SpanContext. +func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc } + +// IsRecording always returns false. +func (nonRecordingSpan) IsRecording() bool { return false } + +// SetStatus does nothing. +func (nonRecordingSpan) SetStatus(codes.Code, string) {} + +// SetError does nothing. +func (nonRecordingSpan) SetError(bool) {} + +// SetAttributes does nothing. +func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {} + +// End does nothing. +func (nonRecordingSpan) End(...trace.SpanEndOption) {} + +// RecordError does nothing. +func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {} + +// AddEvent does nothing. +func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {} + +// SetName does nothing. +func (nonRecordingSpan) SetName(string) {} + +func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider } diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/src/runtime/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go index dae825ed8..0d806b1c8 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -38,14 +38,6 @@ func RawToInt64(r uint64) int64 { return int64(r) } -func Uint64ToRaw(u uint64) uint64 { - return u -} - -func RawToUint64(r uint64) uint64 { - return r -} - func Float64ToRaw(f float64) uint64 { return math.Float64bits(f) } @@ -54,30 +46,6 @@ func RawToFloat64(r uint64) float64 { return math.Float64frombits(r) } -func Int32ToRaw(i int32) uint64 { - return uint64(i) -} - -func RawToInt32(r uint64) int32 { - return int32(r) -} - -func Uint32ToRaw(u uint32) uint64 { - return uint64(u) -} - -func RawToUint32(r uint64) uint32 { - return uint32(r) -} - -func Float32ToRaw(f float32) uint64 { - return Uint32ToRaw(math.Float32bits(f)) -} - -func RawToFloat32(r uint64) float32 { - return math.Float32frombits(RawToUint32(r)) -} - func RawPtrToFloat64Ptr(r *uint64) *float64 { return (*float64)(unsafe.Pointer(r)) } @@ -85,7 +53,3 @@ func RawPtrToFloat64Ptr(r *uint64) *float64 { func RawPtrToInt64Ptr(r *uint64) *int64 { return (*int64)(unsafe.Pointer(r)) } - -func RawPtrToUint64Ptr(r *uint64) *uint64 { - return r -} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/internal/trace/parent/parent.go b/src/runtime/vendor/go.opentelemetry.io/otel/internal/trace/parent/parent.go deleted file mode 100644 index 052926510..000000000 --- a/src/runtime/vendor/go.opentelemetry.io/otel/internal/trace/parent/parent.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package parent - -import ( - "context" - - "go.opentelemetry.io/otel/label" - "go.opentelemetry.io/otel/trace" -) - -func GetSpanContextAndLinks(ctx context.Context, ignoreContext bool) (trace.SpanContext, bool, []trace.Link) { - lsctx := trace.SpanContextFromContext(ctx) - rsctx := trace.RemoteSpanContextFromContext(ctx) - - if ignoreContext { - links := addLinkIfValid(nil, lsctx, "current") - links = addLinkIfValid(links, rsctx, "remote") - - return trace.SpanContext{}, false, links - } - if lsctx.IsValid() { - return lsctx, false, nil - } - if rsctx.IsValid() { - return rsctx, true, nil - } - return trace.SpanContext{}, false, nil -} - -func addLinkIfValid(links []trace.Link, sc trace.SpanContext, kind string) []trace.Link { - if !sc.IsValid() { - return links - } - return append(links, trace.Link{ - SpanContext: sc, - Attributes: []label.KeyValue{ - label.String("ignored-on-demand", kind), - }, - }) -} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/label/key.go b/src/runtime/vendor/go.opentelemetry.io/otel/label/key.go deleted file mode 100644 index 732fa2523..000000000 --- a/src/runtime/vendor/go.opentelemetry.io/otel/label/key.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package label // import "go.opentelemetry.io/otel/label" - -// Key represents the key part in key-value pairs. It's a string. The -// allowed character set in the key depends on the use of the key. -type Key string - -// Bool creates a KeyValue instance with a BOOL Value. -// -// If creating both key and a bool value at the same time, then -// instead of calling Key(name).Bool(value) consider using a -// convenience function provided by the api/key package - -// key.Bool(name, value). -func (k Key) Bool(v bool) KeyValue { - return KeyValue{ - Key: k, - Value: BoolValue(v), - } -} - -// Int64 creates a KeyValue instance with an INT64 Value. -// -// If creating both key and an int64 value at the same time, then -// instead of calling Key(name).Int64(value) consider using a -// convenience function provided by the api/key package - -// key.Int64(name, value). -func (k Key) Int64(v int64) KeyValue { - return KeyValue{ - Key: k, - Value: Int64Value(v), - } -} - -// Uint64 creates a KeyValue instance with a UINT64 Value. -// -// If creating both key and a uint64 value at the same time, then -// instead of calling Key(name).Uint64(value) consider using a -// convenience function provided by the api/key package - -// key.Uint64(name, value). -func (k Key) Uint64(v uint64) KeyValue { - return KeyValue{ - Key: k, - Value: Uint64Value(v), - } -} - -// Float64 creates a KeyValue instance with a FLOAT64 Value. -// -// If creating both key and a float64 value at the same time, then -// instead of calling Key(name).Float64(value) consider using a -// convenience function provided by the api/key package - -// key.Float64(name, value). -func (k Key) Float64(v float64) KeyValue { - return KeyValue{ - Key: k, - Value: Float64Value(v), - } -} - -// Int32 creates a KeyValue instance with an INT32 Value. -// -// If creating both key and an int32 value at the same time, then -// instead of calling Key(name).Int32(value) consider using a -// convenience function provided by the api/key package - -// key.Int32(name, value). -func (k Key) Int32(v int32) KeyValue { - return KeyValue{ - Key: k, - Value: Int32Value(v), - } -} - -// Uint32 creates a KeyValue instance with a UINT32 Value. -// -// If creating both key and a uint32 value at the same time, then -// instead of calling Key(name).Uint32(value) consider using a -// convenience function provided by the api/key package - -// key.Uint32(name, value). -func (k Key) Uint32(v uint32) KeyValue { - return KeyValue{ - Key: k, - Value: Uint32Value(v), - } -} - -// Float32 creates a KeyValue instance with a FLOAT32 Value. -// -// If creating both key and a float32 value at the same time, then -// instead of calling Key(name).Float32(value) consider using a -// convenience function provided by the api/key package - -// key.Float32(name, value). -func (k Key) Float32(v float32) KeyValue { - return KeyValue{ - Key: k, - Value: Float32Value(v), - } -} - -// String creates a KeyValue instance with a STRING Value. -// -// If creating both key and a string value at the same time, then -// instead of calling Key(name).String(value) consider using a -// convenience function provided by the api/key package - -// key.String(name, value). -func (k Key) String(v string) KeyValue { - return KeyValue{ - Key: k, - Value: StringValue(v), - } -} - -// Int creates a KeyValue instance with either an INT32 or an INT64 -// Value, depending on whether the int type is 32 or 64 bits wide. -// -// If creating both key and an int value at the same time, then -// instead of calling Key(name).Int(value) consider using a -// convenience function provided by the api/key package - -// key.Int(name, value). -func (k Key) Int(v int) KeyValue { - return KeyValue{ - Key: k, - Value: IntValue(v), - } -} - -// Uint creates a KeyValue instance with either a UINT32 or a UINT64 -// Value, depending on whether the uint type is 32 or 64 bits wide. -// -// If creating both key and a uint value at the same time, then -// instead of calling Key(name).Uint(value) consider using a -// convenience function provided by the api/key package - -// key.Uint(name, value). -func (k Key) Uint(v uint) KeyValue { - return KeyValue{ - Key: k, - Value: UintValue(v), - } -} - -// Defined returns true for non-empty keys. -func (k Key) Defined() bool { - return len(k) != 0 -} - -// Array creates a KeyValue instance with a ARRAY Value. -// -// If creating both key and a array value at the same time, then -// instead of calling Key(name).String(value) consider using a -// convenience function provided by the api/key package - -// key.Array(name, value). -func (k Key) Array(v interface{}) KeyValue { - return KeyValue{ - Key: k, - Value: ArrayValue(v), - } -} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/label/kv.go b/src/runtime/vendor/go.opentelemetry.io/otel/label/kv.go deleted file mode 100644 index 0520f4f41..000000000 --- a/src/runtime/vendor/go.opentelemetry.io/otel/label/kv.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package label // import "go.opentelemetry.io/otel/label" - -import ( - "encoding/json" - "fmt" - "reflect" -) - -// KeyValue holds a key and value pair. -type KeyValue struct { - Key Key - Value Value -} - -// Bool creates a new key-value pair with a passed name and a bool -// value. -func Bool(k string, v bool) KeyValue { - return Key(k).Bool(v) -} - -// Int64 creates a new key-value pair with a passed name and an int64 -// value. -func Int64(k string, v int64) KeyValue { - return Key(k).Int64(v) -} - -// Uint64 creates a new key-value pair with a passed name and a uint64 -// value. -func Uint64(k string, v uint64) KeyValue { - return Key(k).Uint64(v) -} - -// Float64 creates a new key-value pair with a passed name and a float64 -// value. -func Float64(k string, v float64) KeyValue { - return Key(k).Float64(v) -} - -// Int32 creates a new key-value pair with a passed name and an int32 -// value. -func Int32(k string, v int32) KeyValue { - return Key(k).Int32(v) -} - -// Uint32 creates a new key-value pair with a passed name and a uint32 -// value. -func Uint32(k string, v uint32) KeyValue { - return Key(k).Uint32(v) -} - -// Float32 creates a new key-value pair with a passed name and a float32 -// value. -func Float32(k string, v float32) KeyValue { - return Key(k).Float32(v) -} - -// String creates a new key-value pair with a passed name and a string -// value. -func String(k, v string) KeyValue { - return Key(k).String(v) -} - -// Stringer creates a new key-value pair with a passed name and a string -// value generated by the passed Stringer interface. -func Stringer(k string, v fmt.Stringer) KeyValue { - return Key(k).String(v.String()) -} - -// Int creates a new key-value pair instance with a passed name and -// either an int32 or an int64 value, depending on whether the int -// type is 32 or 64 bits wide. -func Int(k string, v int) KeyValue { - return Key(k).Int(v) -} - -// Uint creates a new key-value pair instance with a passed name and -// either an uint32 or an uint64 value, depending on whether the uint -// type is 32 or 64 bits wide. -func Uint(k string, v uint) KeyValue { - return Key(k).Uint(v) -} - -// Array creates a new key-value pair with a passed name and a array. -// Only arrays of primitive type are supported. -func Array(k string, v interface{}) KeyValue { - return Key(k).Array(v) -} - -// Any creates a new key-value pair instance with a passed name and -// automatic type inference. This is slower, and not type-safe. -func Any(k string, value interface{}) KeyValue { - if value == nil { - return String(k, "") - } - - if stringer, ok := value.(fmt.Stringer); ok { - return String(k, stringer.String()) - } - - rv := reflect.ValueOf(value) - - switch rv.Kind() { - case reflect.Array, reflect.Slice: - return Array(k, value) - case reflect.Bool: - return Bool(k, rv.Bool()) - case reflect.Int, reflect.Int8, reflect.Int16: - return Int(k, int(rv.Int())) - case reflect.Int32: - return Int32(k, int32(rv.Int())) - case reflect.Int64: - return Int64(k, int64(rv.Int())) - case reflect.Uint, reflect.Uint8, reflect.Uint16: - return Uint(k, uint(rv.Uint())) - case reflect.Uint32: - return Uint32(k, uint32(rv.Uint())) - case reflect.Uint64, reflect.Uintptr: - return Uint64(k, rv.Uint()) - case reflect.Float32: - return Float32(k, float32(rv.Float())) - case reflect.Float64: - return Float64(k, rv.Float()) - case reflect.String: - return String(k, rv.String()) - } - if b, err := json.Marshal(value); b != nil && err == nil { - return String(k, string(b)) - } - return String(k, fmt.Sprint(value)) -} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/label/value.go b/src/runtime/vendor/go.opentelemetry.io/otel/label/value.go deleted file mode 100644 index 6cc0bee80..000000000 --- a/src/runtime/vendor/go.opentelemetry.io/otel/label/value.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package label // import "go.opentelemetry.io/otel/label" - -import ( - "encoding/json" - "fmt" - "reflect" - "strconv" - "unsafe" - - "go.opentelemetry.io/otel/internal" -) - -//go:generate stringer -type=Type - -// Type describes the type of the data Value holds. -type Type int - -// Value represents the value part in key-value pairs. -type Value struct { - vtype Type - numeric uint64 - stringly string - // TODO Lazy value type? - - array interface{} -} - -const ( - // INVALID is used for a Value with no value set. - INVALID Type = iota - // BOOL is a boolean Type Value. - BOOL - // INT32 is a 32-bit signed integral Type Value. - INT32 - // INT64 is a 64-bit signed integral Type Value. - INT64 - // UINT32 is a 32-bit unsigned integral Type Value. - UINT32 - // UINT64 is a 64-bit unsigned integral Type Value. - UINT64 - // FLOAT32 is a 32-bit floating point Type Value. - FLOAT32 - // FLOAT64 is a 64-bit floating point Type Value. - FLOAT64 - // STRING is a string Type Value. - STRING - // ARRAY is an array Type Value used to store 1-dimensional slices or - // arrays of bool, int, int32, int64, uint, uint32, uint64, float, - // float32, float64, or string types. - ARRAY -) - -// BoolValue creates a BOOL Value. -func BoolValue(v bool) Value { - return Value{ - vtype: BOOL, - numeric: internal.BoolToRaw(v), - } -} - -// Int64Value creates an INT64 Value. -func Int64Value(v int64) Value { - return Value{ - vtype: INT64, - numeric: internal.Int64ToRaw(v), - } -} - -// Uint64Value creates a UINT64 Value. -func Uint64Value(v uint64) Value { - return Value{ - vtype: UINT64, - numeric: internal.Uint64ToRaw(v), - } -} - -// Float64Value creates a FLOAT64 Value. -func Float64Value(v float64) Value { - return Value{ - vtype: FLOAT64, - numeric: internal.Float64ToRaw(v), - } -} - -// Int32Value creates an INT32 Value. -func Int32Value(v int32) Value { - return Value{ - vtype: INT32, - numeric: internal.Int32ToRaw(v), - } -} - -// Uint32Value creates a UINT32 Value. -func Uint32Value(v uint32) Value { - return Value{ - vtype: UINT32, - numeric: internal.Uint32ToRaw(v), - } -} - -// Float32Value creates a FLOAT32 Value. -func Float32Value(v float32) Value { - return Value{ - vtype: FLOAT32, - numeric: internal.Float32ToRaw(v), - } -} - -// StringValue creates a STRING Value. -func StringValue(v string) Value { - return Value{ - vtype: STRING, - stringly: v, - } -} - -// IntValue creates either an INT32 or an INT64 Value, depending on whether -// the int type is 32 or 64 bits wide. -func IntValue(v int) Value { - if unsafe.Sizeof(v) == 4 { - return Int32Value(int32(v)) - } - return Int64Value(int64(v)) -} - -// UintValue creates either a UINT32 or a UINT64 Value, depending on whether -// the uint type is 32 or 64 bits wide. -func UintValue(v uint) Value { - if unsafe.Sizeof(v) == 4 { - return Uint32Value(uint32(v)) - } - return Uint64Value(uint64(v)) -} - -// ArrayValue creates an ARRAY value from an array or slice. -// Only arrays or slices of bool, int, int32, int64, uint, uint32, uint64, -// float, float32, float64, or string types are allowed. Specifically, arrays -// and slices can not contain other arrays, slices, structs, or non-standard -// types. If the passed value is not an array or slice of these types an -// INVALID value is returned. -func ArrayValue(v interface{}) Value { - switch reflect.TypeOf(v).Kind() { - case reflect.Array, reflect.Slice: - // get array type regardless of dimensions - typ := reflect.TypeOf(v).Elem() - kind := typ.Kind() - switch kind { - case reflect.Bool, reflect.Int, reflect.Int32, reflect.Int64, - reflect.Float32, reflect.Float64, reflect.String, - reflect.Uint, reflect.Uint32, reflect.Uint64: - val := reflect.ValueOf(v) - length := val.Len() - frozen := reflect.Indirect(reflect.New(reflect.ArrayOf(length, typ))) - reflect.Copy(frozen, val) - return Value{ - vtype: ARRAY, - array: frozen.Interface(), - } - default: - return Value{vtype: INVALID} - } - } - return Value{vtype: INVALID} -} - -// Type returns a type of the Value. -func (v Value) Type() Type { - return v.vtype -} - -// AsBool returns the bool value. Make sure that the Value's type is -// BOOL. -func (v Value) AsBool() bool { - return internal.RawToBool(v.numeric) -} - -// AsInt32 returns the int32 value. Make sure that the Value's type is -// INT32. -func (v Value) AsInt32() int32 { - return internal.RawToInt32(v.numeric) -} - -// AsInt64 returns the int64 value. Make sure that the Value's type is -// INT64. -func (v Value) AsInt64() int64 { - return internal.RawToInt64(v.numeric) -} - -// AsUint32 returns the uint32 value. Make sure that the Value's type -// is UINT32. -func (v Value) AsUint32() uint32 { - return internal.RawToUint32(v.numeric) -} - -// AsUint64 returns the uint64 value. Make sure that the Value's type is -// UINT64. -func (v Value) AsUint64() uint64 { - return internal.RawToUint64(v.numeric) -} - -// AsFloat32 returns the float32 value. Make sure that the Value's -// type is FLOAT32. -func (v Value) AsFloat32() float32 { - return internal.RawToFloat32(v.numeric) -} - -// AsFloat64 returns the float64 value. Make sure that the Value's -// type is FLOAT64. -func (v Value) AsFloat64() float64 { - return internal.RawToFloat64(v.numeric) -} - -// AsString returns the string value. Make sure that the Value's type -// is STRING. -func (v Value) AsString() string { - return v.stringly -} - -// AsArray returns the array Value as an interface{}. -func (v Value) AsArray() interface{} { - return v.array -} - -type unknownValueType struct{} - -// AsInterface returns Value's data as interface{}. -func (v Value) AsInterface() interface{} { - switch v.Type() { - case ARRAY: - return v.AsArray() - case BOOL: - return v.AsBool() - case INT32: - return v.AsInt32() - case INT64: - return v.AsInt64() - case UINT32: - return v.AsUint32() - case UINT64: - return v.AsUint64() - case FLOAT32: - return v.AsFloat32() - case FLOAT64: - return v.AsFloat64() - case STRING: - return v.stringly - } - return unknownValueType{} -} - -// Emit returns a string representation of Value's data. -func (v Value) Emit() string { - switch v.Type() { - case ARRAY: - return fmt.Sprint(v.array) - case BOOL: - return strconv.FormatBool(v.AsBool()) - case INT32: - return strconv.FormatInt(int64(v.AsInt32()), 10) - case INT64: - return strconv.FormatInt(v.AsInt64(), 10) - case UINT32: - return strconv.FormatUint(uint64(v.AsUint32()), 10) - case UINT64: - return strconv.FormatUint(v.AsUint64(), 10) - case FLOAT32: - return fmt.Sprint(v.AsFloat32()) - case FLOAT64: - return fmt.Sprint(v.AsFloat64()) - case STRING: - return v.stringly - default: - return "unknown" - } -} - -// MarshalJSON returns the JSON encoding of the Value. -func (v Value) MarshalJSON() ([]byte, error) { - var jsonVal struct { - Type string - Value interface{} - } - jsonVal.Type = v.Type().String() - jsonVal.Value = v.AsInterface() - return json.Marshal(jsonVal) -} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/metric.go b/src/runtime/vendor/go.opentelemetry.io/otel/metric.go deleted file mode 100644 index 6d285b911..000000000 --- a/src/runtime/vendor/go.opentelemetry.io/otel/metric.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otel // import "go.opentelemetry.io/otel" - -import ( - "go.opentelemetry.io/otel/internal/global" - "go.opentelemetry.io/otel/metric" -) - -// Meter creates an implementation of the Meter interface from the global -// MeterProvider. The instrumentationName must be the name of the library -// providing instrumentation. This name may be the same as the instrumented -// code only if that code provides built-in instrumentation. If the -// instrumentationName is empty, then a implementation defined default name -// will be used instead. -// -// This is short for MeterProvider().Meter(name) -func Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter { - return GetMeterProvider().Meter(instrumentationName, opts...) -} - -// GetMeterProvider returns the registered global meter provider. If -// none is registered then a default meter provider is returned that -// forwards the Meter interface to the first registered Meter. -// -// Use the meter provider to create a named meter. E.g. -// meter := global.MeterProvider().Meter("example.com/foo") -// or -// meter := global.Meter("example.com/foo") -func GetMeterProvider() metric.MeterProvider { - return global.MeterProvider() -} - -// SetMeterProvider registers `mp` as the global meter provider. -func SetMeterProvider(mp metric.MeterProvider) { - global.SetMeterProvider(mp) -} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/metric/config.go b/src/runtime/vendor/go.opentelemetry.io/otel/metric/config.go deleted file mode 100644 index 02f0ff8e0..000000000 --- a/src/runtime/vendor/go.opentelemetry.io/otel/metric/config.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metric // import "go.opentelemetry.io/otel/metric" - -import ( - "go.opentelemetry.io/otel/unit" -) - -// InstrumentConfig contains options for metric instrument descriptors. -type InstrumentConfig struct { - // Description describes the instrument in human-readable terms. - Description string - // Unit describes the measurement unit for a instrument. - Unit unit.Unit - // InstrumentationName is the name of the library providing - // instrumentation. - InstrumentationName string - // InstrumentationVersion is the version of the library providing - // instrumentation. - InstrumentationVersion string -} - -// InstrumentOption is an interface for applying metric instrument options. -type InstrumentOption interface { - // ApplyMeter is used to set a InstrumentOption value of a - // InstrumentConfig. - ApplyInstrument(*InstrumentConfig) -} - -// NewInstrumentConfig creates a new InstrumentConfig -// and applies all the given options. -func NewInstrumentConfig(opts ...InstrumentOption) InstrumentConfig { - var config InstrumentConfig - for _, o := range opts { - o.ApplyInstrument(&config) - } - return config -} - -// WithDescription applies provided description. -func WithDescription(desc string) InstrumentOption { - return descriptionOption(desc) -} - -type descriptionOption string - -func (d descriptionOption) ApplyInstrument(config *InstrumentConfig) { - config.Description = string(d) -} - -// WithUnit applies provided unit. -func WithUnit(unit unit.Unit) InstrumentOption { - return unitOption(unit) -} - -type unitOption unit.Unit - -func (u unitOption) ApplyInstrument(config *InstrumentConfig) { - config.Unit = unit.Unit(u) -} - -// WithInstrumentationName sets the instrumentation name. -func WithInstrumentationName(name string) InstrumentOption { - return instrumentationNameOption(name) -} - -type instrumentationNameOption string - -func (i instrumentationNameOption) ApplyInstrument(config *InstrumentConfig) { - config.InstrumentationName = string(i) -} - -// MeterConfig contains options for Meters. -type MeterConfig struct { - // InstrumentationVersion is the version of the library providing - // instrumentation. - InstrumentationVersion string -} - -// MeterOption is an interface for applying Meter options. -type MeterOption interface { - // ApplyMeter is used to set a MeterOption value of a MeterConfig. - ApplyMeter(*MeterConfig) -} - -// NewMeterConfig creates a new MeterConfig and applies -// all the given options. -func NewMeterConfig(opts ...MeterOption) MeterConfig { - var config MeterConfig - for _, o := range opts { - o.ApplyMeter(&config) - } - return config -} - -// InstrumentationOption is an interface for applying instrumentation specific -// options. -type InstrumentationOption interface { - InstrumentOption - MeterOption -} - -// WithInstrumentationVersion sets the instrumentation version. -func WithInstrumentationVersion(version string) InstrumentationOption { - return instrumentationVersionOption(version) -} - -type instrumentationVersionOption string - -func (i instrumentationVersionOption) ApplyMeter(config *MeterConfig) { - config.InstrumentationVersion = string(i) -} - -func (i instrumentationVersionOption) ApplyInstrument(config *InstrumentConfig) { - config.InstrumentationVersion = string(i) -} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/metric/doc.go b/src/runtime/vendor/go.opentelemetry.io/otel/metric/doc.go deleted file mode 100644 index 7889ff000..000000000 --- a/src/runtime/vendor/go.opentelemetry.io/otel/metric/doc.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package metric provides an implementation of the metrics part of the -OpenTelemetry API. - -This package is currently in a pre-GA phase. Backwards incompatible changes -may be introduced in subsequent minor version releases as we work to track the -evolving OpenTelemetry specification and user feedback. - -Measurements can be made about an operation being performed or the state of a -system in general. These measurements can be crucial to the reliable operation -of code and provide valuable insights about the inner workings of a system. - -Measurements are made using instruments provided by this package. The type of -instrument used will depend on the type of measurement being made and of what -part of a system is being measured. - -Instruments are categorized as Synchronous or Asynchronous and independently -as Adding or Grouping. Synchronous instruments are called by the user with a -Context. Asynchronous instruments are called by the SDK during collection. -Additive instruments are semantically intended for capturing a sum. Grouping -instruments are intended for capturing a distribution. - -Additive instruments may be monotonic, in which case they are non-decreasing -and naturally define a rate. - -The synchronous instrument names are: - - Counter: additive, monotonic - UpDownCounter: additive - ValueRecorder: grouping - -and the asynchronous instruments are: - - SumObserver: additive, monotonic - UpDownSumObserver: additive - ValueObserver: grouping - -All instruments are provided with support for either float64 or int64 input -values. - -An instrument is created using a Meter. Additionally, a Meter is used to -record batches of synchronous measurements or asynchronous observations. A -Meter is obtained using a MeterProvider. A Meter, like a Tracer, is unique to -the instrumentation it instruments and must be named and versioned when -created with a MeterProvider with the name and version of the instrumentation -library. - -Instrumentation should be designed to accept a MeterProvider from which it can -create its own unique Meter. Alternatively, the registered global -MeterProvider from the go.opentelemetry.io/otel package can be used as a -default. -*/ -package metric // import "go.opentelemetry.io/otel/metric" diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/metric/instrumentkind_string.go b/src/runtime/vendor/go.opentelemetry.io/otel/metric/instrumentkind_string.go deleted file mode 100644 index 2805e2250..000000000 --- a/src/runtime/vendor/go.opentelemetry.io/otel/metric/instrumentkind_string.go +++ /dev/null @@ -1,28 +0,0 @@ -// Code generated by "stringer -type=InstrumentKind"; DO NOT EDIT. - -package metric - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[ValueRecorderInstrumentKind-0] - _ = x[ValueObserverInstrumentKind-1] - _ = x[CounterInstrumentKind-2] - _ = x[UpDownCounterInstrumentKind-3] - _ = x[SumObserverInstrumentKind-4] - _ = x[UpDownSumObserverInstrumentKind-5] -} - -const _InstrumentKind_name = "ValueRecorderInstrumentKindValueObserverInstrumentKindCounterInstrumentKindUpDownCounterInstrumentKindSumObserverInstrumentKindUpDownSumObserverInstrumentKind" - -var _InstrumentKind_index = [...]uint8{0, 27, 54, 75, 102, 127, 158} - -func (i InstrumentKind) String() string { - if i < 0 || i >= InstrumentKind(len(_InstrumentKind_index)-1) { - return "InstrumentKind(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _InstrumentKind_name[_InstrumentKind_index[i]:_InstrumentKind_index[i+1]] -} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/metric/metric.go b/src/runtime/vendor/go.opentelemetry.io/otel/metric/metric.go deleted file mode 100644 index 0b988abba..000000000 --- a/src/runtime/vendor/go.opentelemetry.io/otel/metric/metric.go +++ /dev/null @@ -1,577 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metric // import "go.opentelemetry.io/otel/metric" - -import ( - "context" - - "go.opentelemetry.io/otel/label" - "go.opentelemetry.io/otel/metric/number" - "go.opentelemetry.io/otel/unit" -) - -// MeterProvider supports named Meter instances. -type MeterProvider interface { - // Meter creates an implementation of the Meter interface. - // The instrumentationName must be the name of the library providing - // instrumentation. This name may be the same as the instrumented code - // only if that code provides built-in instrumentation. If the - // instrumentationName is empty, then a implementation defined default - // name will be used instead. - Meter(instrumentationName string, opts ...MeterOption) Meter -} - -// Meter is the creator of metric instruments. -// -// An uninitialized Meter is a no-op implementation. -type Meter struct { - impl MeterImpl - name, version string -} - -// RecordBatch atomically records a batch of measurements. -func (m Meter) RecordBatch(ctx context.Context, ls []label.KeyValue, ms ...Measurement) { - if m.impl == nil { - return - } - m.impl.RecordBatch(ctx, ls, ms...) -} - -// NewBatchObserver creates a new BatchObserver that supports -// making batches of observations for multiple instruments. -func (m Meter) NewBatchObserver(callback BatchObserverFunc) BatchObserver { - return BatchObserver{ - meter: m, - runner: newBatchAsyncRunner(callback), - } -} - -// NewInt64Counter creates a new integer Counter instrument with the -// given name, customized with options. May return an error if the -// name is invalid (e.g., empty) or improperly registered (e.g., -// duplicate registration). -func (m Meter) NewInt64Counter(name string, options ...InstrumentOption) (Int64Counter, error) { - return wrapInt64CounterInstrument( - m.newSync(name, CounterInstrumentKind, number.Int64Kind, options)) -} - -// NewFloat64Counter creates a new floating point Counter with the -// given name, customized with options. May return an error if the -// name is invalid (e.g., empty) or improperly registered (e.g., -// duplicate registration). -func (m Meter) NewFloat64Counter(name string, options ...InstrumentOption) (Float64Counter, error) { - return wrapFloat64CounterInstrument( - m.newSync(name, CounterInstrumentKind, number.Float64Kind, options)) -} - -// NewInt64UpDownCounter creates a new integer UpDownCounter instrument with the -// given name, customized with options. May return an error if the -// name is invalid (e.g., empty) or improperly registered (e.g., -// duplicate registration). -func (m Meter) NewInt64UpDownCounter(name string, options ...InstrumentOption) (Int64UpDownCounter, error) { - return wrapInt64UpDownCounterInstrument( - m.newSync(name, UpDownCounterInstrumentKind, number.Int64Kind, options)) -} - -// NewFloat64UpDownCounter creates a new floating point UpDownCounter with the -// given name, customized with options. May return an error if the -// name is invalid (e.g., empty) or improperly registered (e.g., -// duplicate registration). -func (m Meter) NewFloat64UpDownCounter(name string, options ...InstrumentOption) (Float64UpDownCounter, error) { - return wrapFloat64UpDownCounterInstrument( - m.newSync(name, UpDownCounterInstrumentKind, number.Float64Kind, options)) -} - -// NewInt64ValueRecorder creates a new integer ValueRecorder instrument with the -// given name, customized with options. May return an error if the -// name is invalid (e.g., empty) or improperly registered (e.g., -// duplicate registration). -func (m Meter) NewInt64ValueRecorder(name string, opts ...InstrumentOption) (Int64ValueRecorder, error) { - return wrapInt64ValueRecorderInstrument( - m.newSync(name, ValueRecorderInstrumentKind, number.Int64Kind, opts)) -} - -// NewFloat64ValueRecorder creates a new floating point ValueRecorder with the -// given name, customized with options. May return an error if the -// name is invalid (e.g., empty) or improperly registered (e.g., -// duplicate registration). -func (m Meter) NewFloat64ValueRecorder(name string, opts ...InstrumentOption) (Float64ValueRecorder, error) { - return wrapFloat64ValueRecorderInstrument( - m.newSync(name, ValueRecorderInstrumentKind, number.Float64Kind, opts)) -} - -// NewInt64ValueObserver creates a new integer ValueObserver instrument -// with the given name, running a given callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (m Meter) NewInt64ValueObserver(name string, callback Int64ObserverFunc, opts ...InstrumentOption) (Int64ValueObserver, error) { - if callback == nil { - return wrapInt64ValueObserverInstrument(NoopAsync{}, nil) - } - return wrapInt64ValueObserverInstrument( - m.newAsync(name, ValueObserverInstrumentKind, number.Int64Kind, opts, - newInt64AsyncRunner(callback))) -} - -// NewFloat64ValueObserver creates a new floating point ValueObserver with -// the given name, running a given callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (m Meter) NewFloat64ValueObserver(name string, callback Float64ObserverFunc, opts ...InstrumentOption) (Float64ValueObserver, error) { - if callback == nil { - return wrapFloat64ValueObserverInstrument(NoopAsync{}, nil) - } - return wrapFloat64ValueObserverInstrument( - m.newAsync(name, ValueObserverInstrumentKind, number.Float64Kind, opts, - newFloat64AsyncRunner(callback))) -} - -// NewInt64SumObserver creates a new integer SumObserver instrument -// with the given name, running a given callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (m Meter) NewInt64SumObserver(name string, callback Int64ObserverFunc, opts ...InstrumentOption) (Int64SumObserver, error) { - if callback == nil { - return wrapInt64SumObserverInstrument(NoopAsync{}, nil) - } - return wrapInt64SumObserverInstrument( - m.newAsync(name, SumObserverInstrumentKind, number.Int64Kind, opts, - newInt64AsyncRunner(callback))) -} - -// NewFloat64SumObserver creates a new floating point SumObserver with -// the given name, running a given callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (m Meter) NewFloat64SumObserver(name string, callback Float64ObserverFunc, opts ...InstrumentOption) (Float64SumObserver, error) { - if callback == nil { - return wrapFloat64SumObserverInstrument(NoopAsync{}, nil) - } - return wrapFloat64SumObserverInstrument( - m.newAsync(name, SumObserverInstrumentKind, number.Float64Kind, opts, - newFloat64AsyncRunner(callback))) -} - -// NewInt64UpDownSumObserver creates a new integer UpDownSumObserver instrument -// with the given name, running a given callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (m Meter) NewInt64UpDownSumObserver(name string, callback Int64ObserverFunc, opts ...InstrumentOption) (Int64UpDownSumObserver, error) { - if callback == nil { - return wrapInt64UpDownSumObserverInstrument(NoopAsync{}, nil) - } - return wrapInt64UpDownSumObserverInstrument( - m.newAsync(name, UpDownSumObserverInstrumentKind, number.Int64Kind, opts, - newInt64AsyncRunner(callback))) -} - -// NewFloat64UpDownSumObserver creates a new floating point UpDownSumObserver with -// the given name, running a given callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (m Meter) NewFloat64UpDownSumObserver(name string, callback Float64ObserverFunc, opts ...InstrumentOption) (Float64UpDownSumObserver, error) { - if callback == nil { - return wrapFloat64UpDownSumObserverInstrument(NoopAsync{}, nil) - } - return wrapFloat64UpDownSumObserverInstrument( - m.newAsync(name, UpDownSumObserverInstrumentKind, number.Float64Kind, opts, - newFloat64AsyncRunner(callback))) -} - -// NewInt64ValueObserver creates a new integer ValueObserver instrument -// with the given name, running in a batch callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (b BatchObserver) NewInt64ValueObserver(name string, opts ...InstrumentOption) (Int64ValueObserver, error) { - if b.runner == nil { - return wrapInt64ValueObserverInstrument(NoopAsync{}, nil) - } - return wrapInt64ValueObserverInstrument( - b.meter.newAsync(name, ValueObserverInstrumentKind, number.Int64Kind, opts, b.runner)) -} - -// NewFloat64ValueObserver creates a new floating point ValueObserver with -// the given name, running in a batch callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (b BatchObserver) NewFloat64ValueObserver(name string, opts ...InstrumentOption) (Float64ValueObserver, error) { - if b.runner == nil { - return wrapFloat64ValueObserverInstrument(NoopAsync{}, nil) - } - return wrapFloat64ValueObserverInstrument( - b.meter.newAsync(name, ValueObserverInstrumentKind, number.Float64Kind, opts, - b.runner)) -} - -// NewInt64SumObserver creates a new integer SumObserver instrument -// with the given name, running in a batch callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (b BatchObserver) NewInt64SumObserver(name string, opts ...InstrumentOption) (Int64SumObserver, error) { - if b.runner == nil { - return wrapInt64SumObserverInstrument(NoopAsync{}, nil) - } - return wrapInt64SumObserverInstrument( - b.meter.newAsync(name, SumObserverInstrumentKind, number.Int64Kind, opts, b.runner)) -} - -// NewFloat64SumObserver creates a new floating point SumObserver with -// the given name, running in a batch callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (b BatchObserver) NewFloat64SumObserver(name string, opts ...InstrumentOption) (Float64SumObserver, error) { - if b.runner == nil { - return wrapFloat64SumObserverInstrument(NoopAsync{}, nil) - } - return wrapFloat64SumObserverInstrument( - b.meter.newAsync(name, SumObserverInstrumentKind, number.Float64Kind, opts, - b.runner)) -} - -// NewInt64UpDownSumObserver creates a new integer UpDownSumObserver instrument -// with the given name, running in a batch callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (b BatchObserver) NewInt64UpDownSumObserver(name string, opts ...InstrumentOption) (Int64UpDownSumObserver, error) { - if b.runner == nil { - return wrapInt64UpDownSumObserverInstrument(NoopAsync{}, nil) - } - return wrapInt64UpDownSumObserverInstrument( - b.meter.newAsync(name, UpDownSumObserverInstrumentKind, number.Int64Kind, opts, b.runner)) -} - -// NewFloat64UpDownSumObserver creates a new floating point UpDownSumObserver with -// the given name, running in a batch callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (b BatchObserver) NewFloat64UpDownSumObserver(name string, opts ...InstrumentOption) (Float64UpDownSumObserver, error) { - if b.runner == nil { - return wrapFloat64UpDownSumObserverInstrument(NoopAsync{}, nil) - } - return wrapFloat64UpDownSumObserverInstrument( - b.meter.newAsync(name, UpDownSumObserverInstrumentKind, number.Float64Kind, opts, - b.runner)) -} - -// MeterImpl returns the underlying MeterImpl of this Meter. -func (m Meter) MeterImpl() MeterImpl { - return m.impl -} - -// newAsync constructs one new asynchronous instrument. -func (m Meter) newAsync( - name string, - mkind InstrumentKind, - nkind number.Kind, - opts []InstrumentOption, - runner AsyncRunner, -) ( - AsyncImpl, - error, -) { - if m.impl == nil { - return NoopAsync{}, nil - } - desc := NewDescriptor(name, mkind, nkind, opts...) - desc.config.InstrumentationName = m.name - desc.config.InstrumentationVersion = m.version - return m.impl.NewAsyncInstrument(desc, runner) -} - -// newSync constructs one new synchronous instrument. -func (m Meter) newSync( - name string, - metricKind InstrumentKind, - numberKind number.Kind, - opts []InstrumentOption, -) ( - SyncImpl, - error, -) { - if m.impl == nil { - return NoopSync{}, nil - } - desc := NewDescriptor(name, metricKind, numberKind, opts...) - desc.config.InstrumentationName = m.name - desc.config.InstrumentationVersion = m.version - return m.impl.NewSyncInstrument(desc) -} - -// MeterMust is a wrapper for Meter interfaces that panics when any -// instrument constructor encounters an error. -type MeterMust struct { - meter Meter -} - -// BatchObserverMust is a wrapper for BatchObserver that panics when -// any instrument constructor encounters an error. -type BatchObserverMust struct { - batch BatchObserver -} - -// Must constructs a MeterMust implementation from a Meter, allowing -// the application to panic when any instrument constructor yields an -// error. -func Must(meter Meter) MeterMust { - return MeterMust{meter: meter} -} - -// NewInt64Counter calls `Meter.NewInt64Counter` and returns the -// instrument, panicking if it encounters an error. -func (mm MeterMust) NewInt64Counter(name string, cos ...InstrumentOption) Int64Counter { - if inst, err := mm.meter.NewInt64Counter(name, cos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64Counter calls `Meter.NewFloat64Counter` and returns the -// instrument, panicking if it encounters an error. -func (mm MeterMust) NewFloat64Counter(name string, cos ...InstrumentOption) Float64Counter { - if inst, err := mm.meter.NewFloat64Counter(name, cos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewInt64UpDownCounter calls `Meter.NewInt64UpDownCounter` and returns the -// instrument, panicking if it encounters an error. -func (mm MeterMust) NewInt64UpDownCounter(name string, cos ...InstrumentOption) Int64UpDownCounter { - if inst, err := mm.meter.NewInt64UpDownCounter(name, cos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64UpDownCounter calls `Meter.NewFloat64UpDownCounter` and returns the -// instrument, panicking if it encounters an error. -func (mm MeterMust) NewFloat64UpDownCounter(name string, cos ...InstrumentOption) Float64UpDownCounter { - if inst, err := mm.meter.NewFloat64UpDownCounter(name, cos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewInt64ValueRecorder calls `Meter.NewInt64ValueRecorder` and returns the -// instrument, panicking if it encounters an error. -func (mm MeterMust) NewInt64ValueRecorder(name string, mos ...InstrumentOption) Int64ValueRecorder { - if inst, err := mm.meter.NewInt64ValueRecorder(name, mos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64ValueRecorder calls `Meter.NewFloat64ValueRecorder` and returns the -// instrument, panicking if it encounters an error. -func (mm MeterMust) NewFloat64ValueRecorder(name string, mos ...InstrumentOption) Float64ValueRecorder { - if inst, err := mm.meter.NewFloat64ValueRecorder(name, mos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewInt64ValueObserver calls `Meter.NewInt64ValueObserver` and -// returns the instrument, panicking if it encounters an error. -func (mm MeterMust) NewInt64ValueObserver(name string, callback Int64ObserverFunc, oos ...InstrumentOption) Int64ValueObserver { - if inst, err := mm.meter.NewInt64ValueObserver(name, callback, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64ValueObserver calls `Meter.NewFloat64ValueObserver` and -// returns the instrument, panicking if it encounters an error. -func (mm MeterMust) NewFloat64ValueObserver(name string, callback Float64ObserverFunc, oos ...InstrumentOption) Float64ValueObserver { - if inst, err := mm.meter.NewFloat64ValueObserver(name, callback, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewInt64SumObserver calls `Meter.NewInt64SumObserver` and -// returns the instrument, panicking if it encounters an error. -func (mm MeterMust) NewInt64SumObserver(name string, callback Int64ObserverFunc, oos ...InstrumentOption) Int64SumObserver { - if inst, err := mm.meter.NewInt64SumObserver(name, callback, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64SumObserver calls `Meter.NewFloat64SumObserver` and -// returns the instrument, panicking if it encounters an error. -func (mm MeterMust) NewFloat64SumObserver(name string, callback Float64ObserverFunc, oos ...InstrumentOption) Float64SumObserver { - if inst, err := mm.meter.NewFloat64SumObserver(name, callback, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewInt64UpDownSumObserver calls `Meter.NewInt64UpDownSumObserver` and -// returns the instrument, panicking if it encounters an error. -func (mm MeterMust) NewInt64UpDownSumObserver(name string, callback Int64ObserverFunc, oos ...InstrumentOption) Int64UpDownSumObserver { - if inst, err := mm.meter.NewInt64UpDownSumObserver(name, callback, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64UpDownSumObserver calls `Meter.NewFloat64UpDownSumObserver` and -// returns the instrument, panicking if it encounters an error. -func (mm MeterMust) NewFloat64UpDownSumObserver(name string, callback Float64ObserverFunc, oos ...InstrumentOption) Float64UpDownSumObserver { - if inst, err := mm.meter.NewFloat64UpDownSumObserver(name, callback, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewBatchObserver returns a wrapper around BatchObserver that panics -// when any instrument constructor returns an error. -func (mm MeterMust) NewBatchObserver(callback BatchObserverFunc) BatchObserverMust { - return BatchObserverMust{ - batch: mm.meter.NewBatchObserver(callback), - } -} - -// NewInt64ValueObserver calls `BatchObserver.NewInt64ValueObserver` and -// returns the instrument, panicking if it encounters an error. -func (bm BatchObserverMust) NewInt64ValueObserver(name string, oos ...InstrumentOption) Int64ValueObserver { - if inst, err := bm.batch.NewInt64ValueObserver(name, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64ValueObserver calls `BatchObserver.NewFloat64ValueObserver` and -// returns the instrument, panicking if it encounters an error. -func (bm BatchObserverMust) NewFloat64ValueObserver(name string, oos ...InstrumentOption) Float64ValueObserver { - if inst, err := bm.batch.NewFloat64ValueObserver(name, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewInt64SumObserver calls `BatchObserver.NewInt64SumObserver` and -// returns the instrument, panicking if it encounters an error. -func (bm BatchObserverMust) NewInt64SumObserver(name string, oos ...InstrumentOption) Int64SumObserver { - if inst, err := bm.batch.NewInt64SumObserver(name, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64SumObserver calls `BatchObserver.NewFloat64SumObserver` and -// returns the instrument, panicking if it encounters an error. -func (bm BatchObserverMust) NewFloat64SumObserver(name string, oos ...InstrumentOption) Float64SumObserver { - if inst, err := bm.batch.NewFloat64SumObserver(name, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewInt64UpDownSumObserver calls `BatchObserver.NewInt64UpDownSumObserver` and -// returns the instrument, panicking if it encounters an error. -func (bm BatchObserverMust) NewInt64UpDownSumObserver(name string, oos ...InstrumentOption) Int64UpDownSumObserver { - if inst, err := bm.batch.NewInt64UpDownSumObserver(name, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64UpDownSumObserver calls `BatchObserver.NewFloat64UpDownSumObserver` and -// returns the instrument, panicking if it encounters an error. -func (bm BatchObserverMust) NewFloat64UpDownSumObserver(name string, oos ...InstrumentOption) Float64UpDownSumObserver { - if inst, err := bm.batch.NewFloat64UpDownSumObserver(name, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// Descriptor contains all the settings that describe an instrument, -// including its name, metric kind, number kind, and the configurable -// options. -type Descriptor struct { - name string - instrumentKind InstrumentKind - numberKind number.Kind - config InstrumentConfig -} - -// NewDescriptor returns a Descriptor with the given contents. -func NewDescriptor(name string, ikind InstrumentKind, nkind number.Kind, opts ...InstrumentOption) Descriptor { - return Descriptor{ - name: name, - instrumentKind: ikind, - numberKind: nkind, - config: NewInstrumentConfig(opts...), - } -} - -// Name returns the metric instrument's name. -func (d Descriptor) Name() string { - return d.name -} - -// InstrumentKind returns the specific kind of instrument. -func (d Descriptor) InstrumentKind() InstrumentKind { - return d.instrumentKind -} - -// Description provides a human-readable description of the metric -// instrument. -func (d Descriptor) Description() string { - return d.config.Description -} - -// Unit describes the units of the metric instrument. Unitless -// metrics return the empty string. -func (d Descriptor) Unit() unit.Unit { - return d.config.Unit -} - -// NumberKind returns whether this instrument is declared over int64, -// float64, or uint64 values. -func (d Descriptor) NumberKind() number.Kind { - return d.numberKind -} - -// InstrumentationName returns the name of the library that provided -// instrumentation for this instrument. -func (d Descriptor) InstrumentationName() string { - return d.config.InstrumentationName -} - -// InstrumentationVersion returns the version of the library that provided -// instrumentation for this instrument. -func (d Descriptor) InstrumentationVersion() string { - return d.config.InstrumentationVersion -} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/metric/metric_instrument.go b/src/runtime/vendor/go.opentelemetry.io/otel/metric/metric_instrument.go deleted file mode 100644 index 6e2f9ce95..000000000 --- a/src/runtime/vendor/go.opentelemetry.io/otel/metric/metric_instrument.go +++ /dev/null @@ -1,777 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:generate stringer -type=InstrumentKind - -package metric // import "go.opentelemetry.io/otel/metric" - -import ( - "context" - "errors" - - "go.opentelemetry.io/otel/label" - "go.opentelemetry.io/otel/metric/number" -) - -// ErrSDKReturnedNilImpl is returned when a new `MeterImpl` returns nil. -var ErrSDKReturnedNilImpl = errors.New("SDK returned a nil implementation") - -// InstrumentKind describes the kind of instrument. -type InstrumentKind int8 - -const ( - // ValueRecorderInstrumentKind indicates a ValueRecorder instrument. - ValueRecorderInstrumentKind InstrumentKind = iota - // ValueObserverInstrumentKind indicates an ValueObserver instrument. - ValueObserverInstrumentKind - - // CounterInstrumentKind indicates a Counter instrument. - CounterInstrumentKind - // UpDownCounterInstrumentKind indicates a UpDownCounter instrument. - UpDownCounterInstrumentKind - - // SumObserverInstrumentKind indicates a SumObserver instrument. - SumObserverInstrumentKind - // UpDownSumObserverInstrumentKind indicates a UpDownSumObserver - // instrument. - UpDownSumObserverInstrumentKind -) - -// Synchronous returns whether this is a synchronous kind of instrument. -func (k InstrumentKind) Synchronous() bool { - switch k { - case CounterInstrumentKind, UpDownCounterInstrumentKind, ValueRecorderInstrumentKind: - return true - } - return false -} - -// Asynchronous returns whether this is an asynchronous kind of instrument. -func (k InstrumentKind) Asynchronous() bool { - return !k.Synchronous() -} - -// Adding returns whether this kind of instrument adds its inputs (as opposed to Grouping). -func (k InstrumentKind) Adding() bool { - switch k { - case CounterInstrumentKind, UpDownCounterInstrumentKind, SumObserverInstrumentKind, UpDownSumObserverInstrumentKind: - return true - } - return false -} - -// Grouping returns whether this kind of instrument groups its inputs (as opposed to Adding). -func (k InstrumentKind) Grouping() bool { - return !k.Adding() -} - -// Monotonic returns whether this kind of instrument exposes a non-decreasing sum. -func (k InstrumentKind) Monotonic() bool { - switch k { - case CounterInstrumentKind, SumObserverInstrumentKind: - return true - } - return false -} - -// PrecomputedSum returns whether this kind of instrument receives precomputed sums. -func (k InstrumentKind) PrecomputedSum() bool { - return k.Adding() && k.Asynchronous() -} - -// Observation is used for reporting an asynchronous batch of metric -// values. Instances of this type should be created by asynchronous -// instruments (e.g., Int64ValueObserver.Observation()). -type Observation struct { - // number needs to be aligned for 64-bit atomic operations. - number number.Number - instrument AsyncImpl -} - -// Int64ObserverFunc is a type of callback that integral -// observers run. -type Int64ObserverFunc func(context.Context, Int64ObserverResult) - -// Float64ObserverFunc is a type of callback that floating point -// observers run. -type Float64ObserverFunc func(context.Context, Float64ObserverResult) - -// BatchObserverFunc is a callback argument for use with any -// Observer instrument that will be reported as a batch of -// observations. -type BatchObserverFunc func(context.Context, BatchObserverResult) - -// Int64ObserverResult is passed to an observer callback to capture -// observations for one asynchronous integer metric instrument. -type Int64ObserverResult struct { - instrument AsyncImpl - function func([]label.KeyValue, ...Observation) -} - -// Float64ObserverResult is passed to an observer callback to capture -// observations for one asynchronous floating point metric instrument. -type Float64ObserverResult struct { - instrument AsyncImpl - function func([]label.KeyValue, ...Observation) -} - -// BatchObserverResult is passed to a batch observer callback to -// capture observations for multiple asynchronous instruments. -type BatchObserverResult struct { - function func([]label.KeyValue, ...Observation) -} - -// Observe captures a single integer value from the associated -// instrument callback, with the given labels. -func (ir Int64ObserverResult) Observe(value int64, labels ...label.KeyValue) { - ir.function(labels, Observation{ - instrument: ir.instrument, - number: number.NewInt64Number(value), - }) -} - -// Observe captures a single floating point value from the associated -// instrument callback, with the given labels. -func (fr Float64ObserverResult) Observe(value float64, labels ...label.KeyValue) { - fr.function(labels, Observation{ - instrument: fr.instrument, - number: number.NewFloat64Number(value), - }) -} - -// Observe captures a multiple observations from the associated batch -// instrument callback, with the given labels. -func (br BatchObserverResult) Observe(labels []label.KeyValue, obs ...Observation) { - br.function(labels, obs...) -} - -// AsyncRunner is expected to convert into an AsyncSingleRunner or an -// AsyncBatchRunner. SDKs will encounter an error if the AsyncRunner -// does not satisfy one of these interfaces. -type AsyncRunner interface { - // AnyRunner() is a non-exported method with no functional use - // other than to make this a non-empty interface. - AnyRunner() -} - -// AsyncSingleRunner is an interface implemented by single-observer -// callbacks. -type AsyncSingleRunner interface { - // Run accepts a single instrument and function for capturing - // observations of that instrument. Each call to the function - // receives one captured observation. (The function accepts - // multiple observations so the same implementation can be - // used for batch runners.) - Run(ctx context.Context, single AsyncImpl, capture func([]label.KeyValue, ...Observation)) - - AsyncRunner -} - -// AsyncBatchRunner is an interface implemented by batch-observer -// callbacks. -type AsyncBatchRunner interface { - // Run accepts a function for capturing observations of - // multiple instruments. - Run(ctx context.Context, capture func([]label.KeyValue, ...Observation)) - - AsyncRunner -} - -var _ AsyncSingleRunner = (*Int64ObserverFunc)(nil) -var _ AsyncSingleRunner = (*Float64ObserverFunc)(nil) -var _ AsyncBatchRunner = (*BatchObserverFunc)(nil) - -// newInt64AsyncRunner returns a single-observer callback for integer Observer instruments. -func newInt64AsyncRunner(c Int64ObserverFunc) AsyncSingleRunner { - return &c -} - -// newFloat64AsyncRunner returns a single-observer callback for floating point Observer instruments. -func newFloat64AsyncRunner(c Float64ObserverFunc) AsyncSingleRunner { - return &c -} - -// newBatchAsyncRunner returns a batch-observer callback use with multiple Observer instruments. -func newBatchAsyncRunner(c BatchObserverFunc) AsyncBatchRunner { - return &c -} - -// AnyRunner implements AsyncRunner. -func (*Int64ObserverFunc) AnyRunner() {} - -// AnyRunner implements AsyncRunner. -func (*Float64ObserverFunc) AnyRunner() {} - -// AnyRunner implements AsyncRunner. -func (*BatchObserverFunc) AnyRunner() {} - -// Run implements AsyncSingleRunner. -func (i *Int64ObserverFunc) Run(ctx context.Context, impl AsyncImpl, function func([]label.KeyValue, ...Observation)) { - (*i)(ctx, Int64ObserverResult{ - instrument: impl, - function: function, - }) -} - -// Run implements AsyncSingleRunner. -func (f *Float64ObserverFunc) Run(ctx context.Context, impl AsyncImpl, function func([]label.KeyValue, ...Observation)) { - (*f)(ctx, Float64ObserverResult{ - instrument: impl, - function: function, - }) -} - -// Run implements AsyncBatchRunner. -func (b *BatchObserverFunc) Run(ctx context.Context, function func([]label.KeyValue, ...Observation)) { - (*b)(ctx, BatchObserverResult{ - function: function, - }) -} - -// wrapInt64ValueObserverInstrument converts an AsyncImpl into Int64ValueObserver. -func wrapInt64ValueObserverInstrument(asyncInst AsyncImpl, err error) (Int64ValueObserver, error) { - common, err := checkNewAsync(asyncInst, err) - return Int64ValueObserver{asyncInstrument: common}, err -} - -// wrapFloat64ValueObserverInstrument converts an AsyncImpl into Float64ValueObserver. -func wrapFloat64ValueObserverInstrument(asyncInst AsyncImpl, err error) (Float64ValueObserver, error) { - common, err := checkNewAsync(asyncInst, err) - return Float64ValueObserver{asyncInstrument: common}, err -} - -// wrapInt64SumObserverInstrument converts an AsyncImpl into Int64SumObserver. -func wrapInt64SumObserverInstrument(asyncInst AsyncImpl, err error) (Int64SumObserver, error) { - common, err := checkNewAsync(asyncInst, err) - return Int64SumObserver{asyncInstrument: common}, err -} - -// wrapFloat64SumObserverInstrument converts an AsyncImpl into Float64SumObserver. -func wrapFloat64SumObserverInstrument(asyncInst AsyncImpl, err error) (Float64SumObserver, error) { - common, err := checkNewAsync(asyncInst, err) - return Float64SumObserver{asyncInstrument: common}, err -} - -// wrapInt64UpDownSumObserverInstrument converts an AsyncImpl into Int64UpDownSumObserver. -func wrapInt64UpDownSumObserverInstrument(asyncInst AsyncImpl, err error) (Int64UpDownSumObserver, error) { - common, err := checkNewAsync(asyncInst, err) - return Int64UpDownSumObserver{asyncInstrument: common}, err -} - -// wrapFloat64UpDownSumObserverInstrument converts an AsyncImpl into Float64UpDownSumObserver. -func wrapFloat64UpDownSumObserverInstrument(asyncInst AsyncImpl, err error) (Float64UpDownSumObserver, error) { - common, err := checkNewAsync(asyncInst, err) - return Float64UpDownSumObserver{asyncInstrument: common}, err -} - -// BatchObserver represents an Observer callback that can report -// observations for multiple instruments. -type BatchObserver struct { - meter Meter - runner AsyncBatchRunner -} - -// Int64ValueObserver is a metric that captures a set of int64 values at a -// point in time. -type Int64ValueObserver struct { - asyncInstrument -} - -// Float64ValueObserver is a metric that captures a set of float64 values -// at a point in time. -type Float64ValueObserver struct { - asyncInstrument -} - -// Int64SumObserver is a metric that captures a precomputed sum of -// int64 values at a point in time. -type Int64SumObserver struct { - asyncInstrument -} - -// Float64SumObserver is a metric that captures a precomputed sum of -// float64 values at a point in time. -type Float64SumObserver struct { - asyncInstrument -} - -// Int64UpDownSumObserver is a metric that captures a precomputed sum of -// int64 values at a point in time. -type Int64UpDownSumObserver struct { - asyncInstrument -} - -// Float64UpDownSumObserver is a metric that captures a precomputed sum of -// float64 values at a point in time. -type Float64UpDownSumObserver struct { - asyncInstrument -} - -// Observation returns an Observation, a BatchObserverFunc -// argument, for an asynchronous integer instrument. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (i Int64ValueObserver) Observation(v int64) Observation { - return Observation{ - number: number.NewInt64Number(v), - instrument: i.instrument, - } -} - -// Observation returns an Observation, a BatchObserverFunc -// argument, for an asynchronous integer instrument. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (f Float64ValueObserver) Observation(v float64) Observation { - return Observation{ - number: number.NewFloat64Number(v), - instrument: f.instrument, - } -} - -// Observation returns an Observation, a BatchObserverFunc -// argument, for an asynchronous integer instrument. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (i Int64SumObserver) Observation(v int64) Observation { - return Observation{ - number: number.NewInt64Number(v), - instrument: i.instrument, - } -} - -// Observation returns an Observation, a BatchObserverFunc -// argument, for an asynchronous integer instrument. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (f Float64SumObserver) Observation(v float64) Observation { - return Observation{ - number: number.NewFloat64Number(v), - instrument: f.instrument, - } -} - -// Observation returns an Observation, a BatchObserverFunc -// argument, for an asynchronous integer instrument. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (i Int64UpDownSumObserver) Observation(v int64) Observation { - return Observation{ - number: number.NewInt64Number(v), - instrument: i.instrument, - } -} - -// Observation returns an Observation, a BatchObserverFunc -// argument, for an asynchronous integer instrument. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (f Float64UpDownSumObserver) Observation(v float64) Observation { - return Observation{ - number: number.NewFloat64Number(v), - instrument: f.instrument, - } -} - -// Measurement is used for reporting a synchronous batch of metric -// values. Instances of this type should be created by synchronous -// instruments (e.g., Int64Counter.Measurement()). -type Measurement struct { - // number needs to be aligned for 64-bit atomic operations. - number number.Number - instrument SyncImpl -} - -// syncInstrument contains a SyncImpl. -type syncInstrument struct { - instrument SyncImpl -} - -// syncBoundInstrument contains a BoundSyncImpl. -type syncBoundInstrument struct { - boundInstrument BoundSyncImpl -} - -// asyncInstrument contains a AsyncImpl. -type asyncInstrument struct { - instrument AsyncImpl -} - -// SyncImpl returns the instrument that created this measurement. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (m Measurement) SyncImpl() SyncImpl { - return m.instrument -} - -// Number returns a number recorded in this measurement. -func (m Measurement) Number() number.Number { - return m.number -} - -// AsyncImpl returns the instrument that created this observation. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (m Observation) AsyncImpl() AsyncImpl { - return m.instrument -} - -// Number returns a number recorded in this observation. -func (m Observation) Number() number.Number { - return m.number -} - -// AsyncImpl implements AsyncImpl. -func (a asyncInstrument) AsyncImpl() AsyncImpl { - return a.instrument -} - -// SyncImpl returns the implementation object for synchronous instruments. -func (s syncInstrument) SyncImpl() SyncImpl { - return s.instrument -} - -func (s syncInstrument) bind(labels []label.KeyValue) syncBoundInstrument { - return newSyncBoundInstrument(s.instrument.Bind(labels)) -} - -func (s syncInstrument) float64Measurement(value float64) Measurement { - return newMeasurement(s.instrument, number.NewFloat64Number(value)) -} - -func (s syncInstrument) int64Measurement(value int64) Measurement { - return newMeasurement(s.instrument, number.NewInt64Number(value)) -} - -func (s syncInstrument) directRecord(ctx context.Context, number number.Number, labels []label.KeyValue) { - s.instrument.RecordOne(ctx, number, labels) -} - -func (h syncBoundInstrument) directRecord(ctx context.Context, number number.Number) { - h.boundInstrument.RecordOne(ctx, number) -} - -// Unbind calls SyncImpl.Unbind. -func (h syncBoundInstrument) Unbind() { - h.boundInstrument.Unbind() -} - -// checkNewAsync receives an AsyncImpl and potential -// error, and returns the same types, checking for and ensuring that -// the returned interface is not nil. -func checkNewAsync(instrument AsyncImpl, err error) (asyncInstrument, error) { - if instrument == nil { - if err == nil { - err = ErrSDKReturnedNilImpl - } - instrument = NoopAsync{} - } - return asyncInstrument{ - instrument: instrument, - }, err -} - -// checkNewSync receives an SyncImpl and potential -// error, and returns the same types, checking for and ensuring that -// the returned interface is not nil. -func checkNewSync(instrument SyncImpl, err error) (syncInstrument, error) { - if instrument == nil { - if err == nil { - err = ErrSDKReturnedNilImpl - } - // Note: an alternate behavior would be to synthesize a new name - // or group all duplicately-named instruments of a certain type - // together and use a tag for the original name, e.g., - // name = 'invalid.counter.int64' - // label = 'original-name=duplicate-counter-name' - instrument = NoopSync{} - } - return syncInstrument{ - instrument: instrument, - }, err -} - -func newSyncBoundInstrument(boundInstrument BoundSyncImpl) syncBoundInstrument { - return syncBoundInstrument{ - boundInstrument: boundInstrument, - } -} - -func newMeasurement(instrument SyncImpl, number number.Number) Measurement { - return Measurement{ - instrument: instrument, - number: number, - } -} - -// wrapInt64CounterInstrument converts a SyncImpl into Int64Counter. -func wrapInt64CounterInstrument(syncInst SyncImpl, err error) (Int64Counter, error) { - common, err := checkNewSync(syncInst, err) - return Int64Counter{syncInstrument: common}, err -} - -// wrapFloat64CounterInstrument converts a SyncImpl into Float64Counter. -func wrapFloat64CounterInstrument(syncInst SyncImpl, err error) (Float64Counter, error) { - common, err := checkNewSync(syncInst, err) - return Float64Counter{syncInstrument: common}, err -} - -// wrapInt64UpDownCounterInstrument converts a SyncImpl into Int64UpDownCounter. -func wrapInt64UpDownCounterInstrument(syncInst SyncImpl, err error) (Int64UpDownCounter, error) { - common, err := checkNewSync(syncInst, err) - return Int64UpDownCounter{syncInstrument: common}, err -} - -// wrapFloat64UpDownCounterInstrument converts a SyncImpl into Float64UpDownCounter. -func wrapFloat64UpDownCounterInstrument(syncInst SyncImpl, err error) (Float64UpDownCounter, error) { - common, err := checkNewSync(syncInst, err) - return Float64UpDownCounter{syncInstrument: common}, err -} - -// wrapInt64ValueRecorderInstrument converts a SyncImpl into Int64ValueRecorder. -func wrapInt64ValueRecorderInstrument(syncInst SyncImpl, err error) (Int64ValueRecorder, error) { - common, err := checkNewSync(syncInst, err) - return Int64ValueRecorder{syncInstrument: common}, err -} - -// wrapFloat64ValueRecorderInstrument converts a SyncImpl into Float64ValueRecorder. -func wrapFloat64ValueRecorderInstrument(syncInst SyncImpl, err error) (Float64ValueRecorder, error) { - common, err := checkNewSync(syncInst, err) - return Float64ValueRecorder{syncInstrument: common}, err -} - -// Float64Counter is a metric that accumulates float64 values. -type Float64Counter struct { - syncInstrument -} - -// Int64Counter is a metric that accumulates int64 values. -type Int64Counter struct { - syncInstrument -} - -// BoundFloat64Counter is a bound instrument for Float64Counter. -// -// It inherits the Unbind function from syncBoundInstrument. -type BoundFloat64Counter struct { - syncBoundInstrument -} - -// BoundInt64Counter is a boundInstrument for Int64Counter. -// -// It inherits the Unbind function from syncBoundInstrument. -type BoundInt64Counter struct { - syncBoundInstrument -} - -// Bind creates a bound instrument for this counter. The labels are -// associated with values recorded via subsequent calls to Record. -func (c Float64Counter) Bind(labels ...label.KeyValue) (h BoundFloat64Counter) { - h.syncBoundInstrument = c.bind(labels) - return -} - -// Bind creates a bound instrument for this counter. The labels are -// associated with values recorded via subsequent calls to Record. -func (c Int64Counter) Bind(labels ...label.KeyValue) (h BoundInt64Counter) { - h.syncBoundInstrument = c.bind(labels) - return -} - -// Measurement creates a Measurement object to use with batch -// recording. -func (c Float64Counter) Measurement(value float64) Measurement { - return c.float64Measurement(value) -} - -// Measurement creates a Measurement object to use with batch -// recording. -func (c Int64Counter) Measurement(value int64) Measurement { - return c.int64Measurement(value) -} - -// Add adds the value to the counter's sum. The labels should contain -// the keys and values to be associated with this value. -func (c Float64Counter) Add(ctx context.Context, value float64, labels ...label.KeyValue) { - c.directRecord(ctx, number.NewFloat64Number(value), labels) -} - -// Add adds the value to the counter's sum. The labels should contain -// the keys and values to be associated with this value. -func (c Int64Counter) Add(ctx context.Context, value int64, labels ...label.KeyValue) { - c.directRecord(ctx, number.NewInt64Number(value), labels) -} - -// Add adds the value to the counter's sum using the labels -// previously bound to this counter via Bind() -func (b BoundFloat64Counter) Add(ctx context.Context, value float64) { - b.directRecord(ctx, number.NewFloat64Number(value)) -} - -// Add adds the value to the counter's sum using the labels -// previously bound to this counter via Bind() -func (b BoundInt64Counter) Add(ctx context.Context, value int64) { - b.directRecord(ctx, number.NewInt64Number(value)) -} - -// Float64UpDownCounter is a metric instrument that sums floating -// point values. -type Float64UpDownCounter struct { - syncInstrument -} - -// Int64UpDownCounter is a metric instrument that sums integer values. -type Int64UpDownCounter struct { - syncInstrument -} - -// BoundFloat64UpDownCounter is a bound instrument for Float64UpDownCounter. -// -// It inherits the Unbind function from syncBoundInstrument. -type BoundFloat64UpDownCounter struct { - syncBoundInstrument -} - -// BoundInt64UpDownCounter is a boundInstrument for Int64UpDownCounter. -// -// It inherits the Unbind function from syncBoundInstrument. -type BoundInt64UpDownCounter struct { - syncBoundInstrument -} - -// Bind creates a bound instrument for this counter. The labels are -// associated with values recorded via subsequent calls to Record. -func (c Float64UpDownCounter) Bind(labels ...label.KeyValue) (h BoundFloat64UpDownCounter) { - h.syncBoundInstrument = c.bind(labels) - return -} - -// Bind creates a bound instrument for this counter. The labels are -// associated with values recorded via subsequent calls to Record. -func (c Int64UpDownCounter) Bind(labels ...label.KeyValue) (h BoundInt64UpDownCounter) { - h.syncBoundInstrument = c.bind(labels) - return -} - -// Measurement creates a Measurement object to use with batch -// recording. -func (c Float64UpDownCounter) Measurement(value float64) Measurement { - return c.float64Measurement(value) -} - -// Measurement creates a Measurement object to use with batch -// recording. -func (c Int64UpDownCounter) Measurement(value int64) Measurement { - return c.int64Measurement(value) -} - -// Add adds the value to the counter's sum. The labels should contain -// the keys and values to be associated with this value. -func (c Float64UpDownCounter) Add(ctx context.Context, value float64, labels ...label.KeyValue) { - c.directRecord(ctx, number.NewFloat64Number(value), labels) -} - -// Add adds the value to the counter's sum. The labels should contain -// the keys and values to be associated with this value. -func (c Int64UpDownCounter) Add(ctx context.Context, value int64, labels ...label.KeyValue) { - c.directRecord(ctx, number.NewInt64Number(value), labels) -} - -// Add adds the value to the counter's sum using the labels -// previously bound to this counter via Bind() -func (b BoundFloat64UpDownCounter) Add(ctx context.Context, value float64) { - b.directRecord(ctx, number.NewFloat64Number(value)) -} - -// Add adds the value to the counter's sum using the labels -// previously bound to this counter via Bind() -func (b BoundInt64UpDownCounter) Add(ctx context.Context, value int64) { - b.directRecord(ctx, number.NewInt64Number(value)) -} - -// Float64ValueRecorder is a metric that records float64 values. -type Float64ValueRecorder struct { - syncInstrument -} - -// Int64ValueRecorder is a metric that records int64 values. -type Int64ValueRecorder struct { - syncInstrument -} - -// BoundFloat64ValueRecorder is a bound instrument for Float64ValueRecorder. -// -// It inherits the Unbind function from syncBoundInstrument. -type BoundFloat64ValueRecorder struct { - syncBoundInstrument -} - -// BoundInt64ValueRecorder is a bound instrument for Int64ValueRecorder. -// -// It inherits the Unbind function from syncBoundInstrument. -type BoundInt64ValueRecorder struct { - syncBoundInstrument -} - -// Bind creates a bound instrument for this ValueRecorder. The labels are -// associated with values recorded via subsequent calls to Record. -func (c Float64ValueRecorder) Bind(labels ...label.KeyValue) (h BoundFloat64ValueRecorder) { - h.syncBoundInstrument = c.bind(labels) - return -} - -// Bind creates a bound instrument for this ValueRecorder. The labels are -// associated with values recorded via subsequent calls to Record. -func (c Int64ValueRecorder) Bind(labels ...label.KeyValue) (h BoundInt64ValueRecorder) { - h.syncBoundInstrument = c.bind(labels) - return -} - -// Measurement creates a Measurement object to use with batch -// recording. -func (c Float64ValueRecorder) Measurement(value float64) Measurement { - return c.float64Measurement(value) -} - -// Measurement creates a Measurement object to use with batch -// recording. -func (c Int64ValueRecorder) Measurement(value int64) Measurement { - return c.int64Measurement(value) -} - -// Record adds a new value to the list of ValueRecorder's records. The -// labels should contain the keys and values to be associated with -// this value. -func (c Float64ValueRecorder) Record(ctx context.Context, value float64, labels ...label.KeyValue) { - c.directRecord(ctx, number.NewFloat64Number(value), labels) -} - -// Record adds a new value to the ValueRecorder's distribution. The -// labels should contain the keys and values to be associated with -// this value. -func (c Int64ValueRecorder) Record(ctx context.Context, value int64, labels ...label.KeyValue) { - c.directRecord(ctx, number.NewInt64Number(value), labels) -} - -// Record adds a new value to the ValueRecorder's distribution using the labels -// previously bound to the ValueRecorder via Bind(). -func (b BoundFloat64ValueRecorder) Record(ctx context.Context, value float64) { - b.directRecord(ctx, number.NewFloat64Number(value)) -} - -// Record adds a new value to the ValueRecorder's distribution using the labels -// previously bound to the ValueRecorder via Bind(). -func (b BoundInt64ValueRecorder) Record(ctx context.Context, value int64) { - b.directRecord(ctx, number.NewInt64Number(value)) -} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/metric/metric_noop.go b/src/runtime/vendor/go.opentelemetry.io/otel/metric/metric_noop.go deleted file mode 100644 index 5cf26fc24..000000000 --- a/src/runtime/vendor/go.opentelemetry.io/otel/metric/metric_noop.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metric // import "go.opentelemetry.io/otel/metric" - -import ( - "context" - - "go.opentelemetry.io/otel/label" - "go.opentelemetry.io/otel/metric/number" -) - -type NoopMeterProvider struct{} - -type noopInstrument struct{} -type noopBoundInstrument struct{} -type NoopSync struct{ noopInstrument } -type NoopAsync struct{ noopInstrument } - -var _ MeterProvider = NoopMeterProvider{} -var _ SyncImpl = NoopSync{} -var _ BoundSyncImpl = noopBoundInstrument{} -var _ AsyncImpl = NoopAsync{} - -func (NoopMeterProvider) Meter(_ string, _ ...MeterOption) Meter { - return Meter{} -} - -func (noopInstrument) Implementation() interface{} { - return nil -} - -func (noopInstrument) Descriptor() Descriptor { - return Descriptor{} -} - -func (noopBoundInstrument) RecordOne(context.Context, number.Number) { -} - -func (noopBoundInstrument) Unbind() { -} - -func (NoopSync) Bind([]label.KeyValue) BoundSyncImpl { - return noopBoundInstrument{} -} - -func (NoopSync) RecordOne(context.Context, number.Number, []label.KeyValue) { -} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/metric/metric_sdkapi.go b/src/runtime/vendor/go.opentelemetry.io/otel/metric/metric_sdkapi.go deleted file mode 100644 index 77d6adb17..000000000 --- a/src/runtime/vendor/go.opentelemetry.io/otel/metric/metric_sdkapi.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metric // import "go.opentelemetry.io/otel/metric" - -import ( - "context" - - "go.opentelemetry.io/otel/label" - "go.opentelemetry.io/otel/metric/number" -) - -// MeterImpl is the interface an SDK must implement to supply a Meter -// implementation. -type MeterImpl interface { - // RecordBatch atomically records a batch of measurements. - RecordBatch(ctx context.Context, labels []label.KeyValue, measurement ...Measurement) - - // NewSyncInstrument returns a newly constructed - // synchronous instrument implementation or an error, should - // one occur. - NewSyncInstrument(descriptor Descriptor) (SyncImpl, error) - - // NewAsyncInstrument returns a newly constructed - // asynchronous instrument implementation or an error, should - // one occur. - NewAsyncInstrument( - descriptor Descriptor, - runner AsyncRunner, - ) (AsyncImpl, error) -} - -// InstrumentImpl is a common interface for synchronous and -// asynchronous instruments. -type InstrumentImpl interface { - // Implementation returns the underlying implementation of the - // instrument, which allows the implementation to gain access - // to its own representation especially from a `Measurement`. - Implementation() interface{} - - // Descriptor returns a copy of the instrument's Descriptor. - Descriptor() Descriptor -} - -// SyncImpl is the implementation-level interface to a generic -// synchronous instrument (e.g., ValueRecorder and Counter instruments). -type SyncImpl interface { - InstrumentImpl - - // Bind creates an implementation-level bound instrument, - // binding a label set with this instrument implementation. - Bind(labels []label.KeyValue) BoundSyncImpl - - // RecordOne captures a single synchronous metric event. - RecordOne(ctx context.Context, number number.Number, labels []label.KeyValue) -} - -// BoundSyncImpl is the implementation-level interface to a -// generic bound synchronous instrument -type BoundSyncImpl interface { - - // RecordOne captures a single synchronous metric event. - RecordOne(ctx context.Context, number number.Number) - - // Unbind frees the resources associated with this bound instrument. It - // does not affect the metric this bound instrument was created through. - Unbind() -} - -// AsyncImpl is an implementation-level interface to an -// asynchronous instrument (e.g., Observer instruments). -type AsyncImpl interface { - InstrumentImpl -} - -// WrapMeterImpl constructs a `Meter` implementation from a -// `MeterImpl` implementation. -func WrapMeterImpl(impl MeterImpl, instrumentationName string, opts ...MeterOption) Meter { - return Meter{ - impl: impl, - name: instrumentationName, - version: NewMeterConfig(opts...).InstrumentationVersion, - } -} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/metric/number/doc.go b/src/runtime/vendor/go.opentelemetry.io/otel/metric/number/doc.go deleted file mode 100644 index 0649ff875..000000000 --- a/src/runtime/vendor/go.opentelemetry.io/otel/metric/number/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package number provides a number abstraction for instruments that -either support int64 or float64 input values. - -This package is currently in a pre-GA phase. Backwards incompatible changes -may be introduced in subsequent minor version releases as we work to track the -evolving OpenTelemetry specification and user feedback. -*/ -package number // import "go.opentelemetry.io/otel/metric/number" diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/metric/number/kind_string.go b/src/runtime/vendor/go.opentelemetry.io/otel/metric/number/kind_string.go deleted file mode 100644 index 6288c7ea2..000000000 --- a/src/runtime/vendor/go.opentelemetry.io/otel/metric/number/kind_string.go +++ /dev/null @@ -1,24 +0,0 @@ -// Code generated by "stringer -type=Kind"; DO NOT EDIT. - -package number - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[Int64Kind-0] - _ = x[Float64Kind-1] -} - -const _Kind_name = "Int64KindFloat64Kind" - -var _Kind_index = [...]uint8{0, 9, 20} - -func (i Kind) String() string { - if i < 0 || i >= Kind(len(_Kind_index)-1) { - return "Kind(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _Kind_name[_Kind_index[i]:_Kind_index[i+1]] -} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/metric/number/number.go b/src/runtime/vendor/go.opentelemetry.io/otel/metric/number/number.go deleted file mode 100644 index 3ec95e201..000000000 --- a/src/runtime/vendor/go.opentelemetry.io/otel/metric/number/number.go +++ /dev/null @@ -1,538 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package number // import "go.opentelemetry.io/otel/metric/number" - -//go:generate stringer -type=Kind - -import ( - "fmt" - "math" - "sync/atomic" - - "go.opentelemetry.io/otel/internal" -) - -// Kind describes the data type of the Number. -type Kind int8 - -const ( - // Int64Kind means that the Number stores int64. - Int64Kind Kind = iota - // Float64Kind means that the Number stores float64. - Float64Kind -) - -// Zero returns a zero value for a given Kind -func (k Kind) Zero() Number { - switch k { - case Int64Kind: - return NewInt64Number(0) - case Float64Kind: - return NewFloat64Number(0.) - default: - return Number(0) - } -} - -// Minimum returns the minimum representable value -// for a given Kind -func (k Kind) Minimum() Number { - switch k { - case Int64Kind: - return NewInt64Number(math.MinInt64) - case Float64Kind: - return NewFloat64Number(-1. * math.MaxFloat64) - default: - return Number(0) - } -} - -// Maximum returns the maximum representable value -// for a given Kind -func (k Kind) Maximum() Number { - switch k { - case Int64Kind: - return NewInt64Number(math.MaxInt64) - case Float64Kind: - return NewFloat64Number(math.MaxFloat64) - default: - return Number(0) - } -} - -// Number represents either an integral or a floating point value. It -// needs to be accompanied with a source of Kind that describes -// the actual type of the value stored within Number. -type Number uint64 - -// - constructors - -// NewNumberFromRaw creates a new Number from a raw value. -func NewNumberFromRaw(r uint64) Number { - return Number(r) -} - -// NewInt64Number creates an integral Number. -func NewInt64Number(i int64) Number { - return NewNumberFromRaw(internal.Int64ToRaw(i)) -} - -// NewFloat64Number creates a floating point Number. -func NewFloat64Number(f float64) Number { - return NewNumberFromRaw(internal.Float64ToRaw(f)) -} - -// NewNumberSignChange returns a number with the same magnitude and -// the opposite sign. `kind` must describe the kind of number in `nn`. -func NewNumberSignChange(kind Kind, nn Number) Number { - switch kind { - case Int64Kind: - return NewInt64Number(-nn.AsInt64()) - case Float64Kind: - return NewFloat64Number(-nn.AsFloat64()) - } - return nn -} - -// - as x - -// AsNumber gets the Number. -func (n *Number) AsNumber() Number { - return *n -} - -// AsRaw gets the uninterpreted raw value. Might be useful for some -// atomic operations. -func (n *Number) AsRaw() uint64 { - return uint64(*n) -} - -// AsInt64 assumes that the value contains an int64 and returns it as -// such. -func (n *Number) AsInt64() int64 { - return internal.RawToInt64(n.AsRaw()) -} - -// AsFloat64 assumes that the measurement value contains a float64 and -// returns it as such. -func (n *Number) AsFloat64() float64 { - return internal.RawToFloat64(n.AsRaw()) -} - -// - as x atomic - -// AsNumberAtomic gets the Number atomically. -func (n *Number) AsNumberAtomic() Number { - return NewNumberFromRaw(n.AsRawAtomic()) -} - -// AsRawAtomic gets the uninterpreted raw value atomically. Might be -// useful for some atomic operations. -func (n *Number) AsRawAtomic() uint64 { - return atomic.LoadUint64(n.AsRawPtr()) -} - -// AsInt64Atomic assumes that the number contains an int64 and returns -// it as such atomically. -func (n *Number) AsInt64Atomic() int64 { - return atomic.LoadInt64(n.AsInt64Ptr()) -} - -// AsFloat64Atomic assumes that the measurement value contains a -// float64 and returns it as such atomically. -func (n *Number) AsFloat64Atomic() float64 { - return internal.RawToFloat64(n.AsRawAtomic()) -} - -// - as x ptr - -// AsRawPtr gets the pointer to the raw, uninterpreted raw -// value. Might be useful for some atomic operations. -func (n *Number) AsRawPtr() *uint64 { - return (*uint64)(n) -} - -// AsInt64Ptr assumes that the number contains an int64 and returns a -// pointer to it. -func (n *Number) AsInt64Ptr() *int64 { - return internal.RawPtrToInt64Ptr(n.AsRawPtr()) -} - -// AsFloat64Ptr assumes that the number contains a float64 and returns a -// pointer to it. -func (n *Number) AsFloat64Ptr() *float64 { - return internal.RawPtrToFloat64Ptr(n.AsRawPtr()) -} - -// - coerce - -// CoerceToInt64 casts the number to int64. May result in -// data/precision loss. -func (n *Number) CoerceToInt64(kind Kind) int64 { - switch kind { - case Int64Kind: - return n.AsInt64() - case Float64Kind: - return int64(n.AsFloat64()) - default: - // you get what you deserve - return 0 - } -} - -// CoerceToFloat64 casts the number to float64. May result in -// data/precision loss. -func (n *Number) CoerceToFloat64(kind Kind) float64 { - switch kind { - case Int64Kind: - return float64(n.AsInt64()) - case Float64Kind: - return n.AsFloat64() - default: - // you get what you deserve - return 0 - } -} - -// - set - -// SetNumber sets the number to the passed number. Both should be of -// the same kind. -func (n *Number) SetNumber(nn Number) { - *n.AsRawPtr() = nn.AsRaw() -} - -// SetRaw sets the number to the passed raw value. Both number and the -// raw number should represent the same kind. -func (n *Number) SetRaw(r uint64) { - *n.AsRawPtr() = r -} - -// SetInt64 assumes that the number contains an int64 and sets it to -// the passed value. -func (n *Number) SetInt64(i int64) { - *n.AsInt64Ptr() = i -} - -// SetFloat64 assumes that the number contains a float64 and sets it -// to the passed value. -func (n *Number) SetFloat64(f float64) { - *n.AsFloat64Ptr() = f -} - -// - set atomic - -// SetNumberAtomic sets the number to the passed number -// atomically. Both should be of the same kind. -func (n *Number) SetNumberAtomic(nn Number) { - atomic.StoreUint64(n.AsRawPtr(), nn.AsRaw()) -} - -// SetRawAtomic sets the number to the passed raw value -// atomically. Both number and the raw number should represent the -// same kind. -func (n *Number) SetRawAtomic(r uint64) { - atomic.StoreUint64(n.AsRawPtr(), r) -} - -// SetInt64Atomic assumes that the number contains an int64 and sets -// it to the passed value atomically. -func (n *Number) SetInt64Atomic(i int64) { - atomic.StoreInt64(n.AsInt64Ptr(), i) -} - -// SetFloat64Atomic assumes that the number contains a float64 and -// sets it to the passed value atomically. -func (n *Number) SetFloat64Atomic(f float64) { - atomic.StoreUint64(n.AsRawPtr(), internal.Float64ToRaw(f)) -} - -// - swap - -// SwapNumber sets the number to the passed number and returns the old -// number. Both this number and the passed number should be of the -// same kind. -func (n *Number) SwapNumber(nn Number) Number { - old := *n - n.SetNumber(nn) - return old -} - -// SwapRaw sets the number to the passed raw value and returns the old -// raw value. Both number and the raw number should represent the same -// kind. -func (n *Number) SwapRaw(r uint64) uint64 { - old := n.AsRaw() - n.SetRaw(r) - return old -} - -// SwapInt64 assumes that the number contains an int64, sets it to the -// passed value and returns the old int64 value. -func (n *Number) SwapInt64(i int64) int64 { - old := n.AsInt64() - n.SetInt64(i) - return old -} - -// SwapFloat64 assumes that the number contains an float64, sets it to -// the passed value and returns the old float64 value. -func (n *Number) SwapFloat64(f float64) float64 { - old := n.AsFloat64() - n.SetFloat64(f) - return old -} - -// - swap atomic - -// SwapNumberAtomic sets the number to the passed number and returns -// the old number atomically. Both this number and the passed number -// should be of the same kind. -func (n *Number) SwapNumberAtomic(nn Number) Number { - return NewNumberFromRaw(atomic.SwapUint64(n.AsRawPtr(), nn.AsRaw())) -} - -// SwapRawAtomic sets the number to the passed raw value and returns -// the old raw value atomically. Both number and the raw number should -// represent the same kind. -func (n *Number) SwapRawAtomic(r uint64) uint64 { - return atomic.SwapUint64(n.AsRawPtr(), r) -} - -// SwapInt64Atomic assumes that the number contains an int64, sets it -// to the passed value and returns the old int64 value atomically. -func (n *Number) SwapInt64Atomic(i int64) int64 { - return atomic.SwapInt64(n.AsInt64Ptr(), i) -} - -// SwapFloat64Atomic assumes that the number contains an float64, sets -// it to the passed value and returns the old float64 value -// atomically. -func (n *Number) SwapFloat64Atomic(f float64) float64 { - return internal.RawToFloat64(atomic.SwapUint64(n.AsRawPtr(), internal.Float64ToRaw(f))) -} - -// - add - -// AddNumber assumes that this and the passed number are of the passed -// kind and adds the passed number to this number. -func (n *Number) AddNumber(kind Kind, nn Number) { - switch kind { - case Int64Kind: - n.AddInt64(nn.AsInt64()) - case Float64Kind: - n.AddFloat64(nn.AsFloat64()) - } -} - -// AddRaw assumes that this number and the passed raw value are of the -// passed kind and adds the passed raw value to this number. -func (n *Number) AddRaw(kind Kind, r uint64) { - n.AddNumber(kind, NewNumberFromRaw(r)) -} - -// AddInt64 assumes that the number contains an int64 and adds the -// passed int64 to it. -func (n *Number) AddInt64(i int64) { - *n.AsInt64Ptr() += i -} - -// AddFloat64 assumes that the number contains a float64 and adds the -// passed float64 to it. -func (n *Number) AddFloat64(f float64) { - *n.AsFloat64Ptr() += f -} - -// - add atomic - -// AddNumberAtomic assumes that this and the passed number are of the -// passed kind and adds the passed number to this number atomically. -func (n *Number) AddNumberAtomic(kind Kind, nn Number) { - switch kind { - case Int64Kind: - n.AddInt64Atomic(nn.AsInt64()) - case Float64Kind: - n.AddFloat64Atomic(nn.AsFloat64()) - } -} - -// AddRawAtomic assumes that this number and the passed raw value are -// of the passed kind and adds the passed raw value to this number -// atomically. -func (n *Number) AddRawAtomic(kind Kind, r uint64) { - n.AddNumberAtomic(kind, NewNumberFromRaw(r)) -} - -// AddInt64Atomic assumes that the number contains an int64 and adds -// the passed int64 to it atomically. -func (n *Number) AddInt64Atomic(i int64) { - atomic.AddInt64(n.AsInt64Ptr(), i) -} - -// AddFloat64Atomic assumes that the number contains a float64 and -// adds the passed float64 to it atomically. -func (n *Number) AddFloat64Atomic(f float64) { - for { - o := n.AsFloat64Atomic() - if n.CompareAndSwapFloat64(o, o+f) { - break - } - } -} - -// - compare and swap (atomic only) - -// CompareAndSwapNumber does the atomic CAS operation on this -// number. This number and passed old and new numbers should be of the -// same kind. -func (n *Number) CompareAndSwapNumber(on, nn Number) bool { - return atomic.CompareAndSwapUint64(n.AsRawPtr(), on.AsRaw(), nn.AsRaw()) -} - -// CompareAndSwapRaw does the atomic CAS operation on this -// number. This number and passed old and new raw values should be of -// the same kind. -func (n *Number) CompareAndSwapRaw(or, nr uint64) bool { - return atomic.CompareAndSwapUint64(n.AsRawPtr(), or, nr) -} - -// CompareAndSwapInt64 assumes that this number contains an int64 and -// does the atomic CAS operation on it. -func (n *Number) CompareAndSwapInt64(oi, ni int64) bool { - return atomic.CompareAndSwapInt64(n.AsInt64Ptr(), oi, ni) -} - -// CompareAndSwapFloat64 assumes that this number contains a float64 and -// does the atomic CAS operation on it. -func (n *Number) CompareAndSwapFloat64(of, nf float64) bool { - return atomic.CompareAndSwapUint64(n.AsRawPtr(), internal.Float64ToRaw(of), internal.Float64ToRaw(nf)) -} - -// - compare - -// CompareNumber compares two Numbers given their kind. Both numbers -// should have the same kind. This returns: -// 0 if the numbers are equal -// -1 if the subject `n` is less than the argument `nn` -// +1 if the subject `n` is greater than the argument `nn` -func (n *Number) CompareNumber(kind Kind, nn Number) int { - switch kind { - case Int64Kind: - return n.CompareInt64(nn.AsInt64()) - case Float64Kind: - return n.CompareFloat64(nn.AsFloat64()) - default: - // you get what you deserve - return 0 - } -} - -// CompareRaw compares two numbers, where one is input as a raw -// uint64, interpreting both values as a `kind` of number. -func (n *Number) CompareRaw(kind Kind, r uint64) int { - return n.CompareNumber(kind, NewNumberFromRaw(r)) -} - -// CompareInt64 assumes that the Number contains an int64 and performs -// a comparison between the value and the other value. It returns the -// typical result of the compare function: -1 if the value is less -// than the other, 0 if both are equal, 1 if the value is greater than -// the other. -func (n *Number) CompareInt64(i int64) int { - this := n.AsInt64() - if this < i { - return -1 - } else if this > i { - return 1 - } - return 0 -} - -// CompareFloat64 assumes that the Number contains a float64 and -// performs a comparison between the value and the other value. It -// returns the typical result of the compare function: -1 if the value -// is less than the other, 0 if both are equal, 1 if the value is -// greater than the other. -// -// Do not compare NaN values. -func (n *Number) CompareFloat64(f float64) int { - this := n.AsFloat64() - if this < f { - return -1 - } else if this > f { - return 1 - } - return 0 -} - -// - relations to zero - -// IsPositive returns true if the actual value is greater than zero. -func (n *Number) IsPositive(kind Kind) bool { - return n.compareWithZero(kind) > 0 -} - -// IsNegative returns true if the actual value is less than zero. -func (n *Number) IsNegative(kind Kind) bool { - return n.compareWithZero(kind) < 0 -} - -// IsZero returns true if the actual value is equal to zero. -func (n *Number) IsZero(kind Kind) bool { - return n.compareWithZero(kind) == 0 -} - -// - misc - -// Emit returns a string representation of the raw value of the -// Number. A %d is used for integral values, %f for floating point -// values. -func (n *Number) Emit(kind Kind) string { - switch kind { - case Int64Kind: - return fmt.Sprintf("%d", n.AsInt64()) - case Float64Kind: - return fmt.Sprintf("%f", n.AsFloat64()) - default: - return "" - } -} - -// AsInterface returns the number as an interface{}, typically used -// for Kind-correct JSON conversion. -func (n *Number) AsInterface(kind Kind) interface{} { - switch kind { - case Int64Kind: - return n.AsInt64() - case Float64Kind: - return n.AsFloat64() - default: - return math.NaN() - } -} - -// - private stuff - -func (n *Number) compareWithZero(kind Kind) int { - switch kind { - case Int64Kind: - return n.CompareInt64(0) - case Float64Kind: - return n.CompareFloat64(0.) - default: - // you get what you deserve - return 0 - } -} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/metric/registry/doc.go b/src/runtime/vendor/go.opentelemetry.io/otel/metric/registry/doc.go deleted file mode 100644 index a53ba4554..000000000 --- a/src/runtime/vendor/go.opentelemetry.io/otel/metric/registry/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package registry provides a non-standalone implementation of -MeterProvider that adds uniqueness checking for instrument descriptors -on top of other MeterProvider it wraps. - -This package is currently in a pre-GA phase. Backwards incompatible changes -may be introduced in subsequent minor version releases as we work to track the -evolving OpenTelemetry specification and user feedback. -*/ -package registry // import "go.opentelemetry.io/otel/metric/registry" diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/metric/registry/registry.go b/src/runtime/vendor/go.opentelemetry.io/otel/metric/registry/registry.go deleted file mode 100644 index f1d9819c3..000000000 --- a/src/runtime/vendor/go.opentelemetry.io/otel/metric/registry/registry.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package registry // import "go.opentelemetry.io/otel/metric/registry" - -import ( - "context" - "fmt" - "sync" - - "go.opentelemetry.io/otel/label" - "go.opentelemetry.io/otel/metric" -) - -// MeterProvider is a standard MeterProvider for wrapping `MeterImpl` -type MeterProvider struct { - impl metric.MeterImpl -} - -var _ metric.MeterProvider = (*MeterProvider)(nil) - -// uniqueInstrumentMeterImpl implements the metric.MeterImpl interface, adding -// uniqueness checking for instrument descriptors. Use NewUniqueInstrumentMeter -// to wrap an implementation with uniqueness checking. -type uniqueInstrumentMeterImpl struct { - lock sync.Mutex - impl metric.MeterImpl - state map[key]metric.InstrumentImpl -} - -var _ metric.MeterImpl = (*uniqueInstrumentMeterImpl)(nil) - -type key struct { - instrumentName string - instrumentationName string - InstrumentationVersion string -} - -// NewMeterProvider returns a new provider that implements instrument -// name-uniqueness checking. -func NewMeterProvider(impl metric.MeterImpl) *MeterProvider { - return &MeterProvider{ - impl: NewUniqueInstrumentMeterImpl(impl), - } -} - -// Meter implements MeterProvider. -func (p *MeterProvider) Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter { - return metric.WrapMeterImpl(p.impl, instrumentationName, opts...) -} - -// ErrMetricKindMismatch is the standard error for mismatched metric -// instrument definitions. -var ErrMetricKindMismatch = fmt.Errorf( - "A metric was already registered by this name with another kind or number type") - -// NewUniqueInstrumentMeterImpl returns a wrapped metric.MeterImpl with -// the addition of uniqueness checking. -func NewUniqueInstrumentMeterImpl(impl metric.MeterImpl) metric.MeterImpl { - return &uniqueInstrumentMeterImpl{ - impl: impl, - state: map[key]metric.InstrumentImpl{}, - } -} - -// RecordBatch implements metric.MeterImpl. -func (u *uniqueInstrumentMeterImpl) RecordBatch(ctx context.Context, labels []label.KeyValue, ms ...metric.Measurement) { - u.impl.RecordBatch(ctx, labels, ms...) -} - -func keyOf(descriptor metric.Descriptor) key { - return key{ - descriptor.Name(), - descriptor.InstrumentationName(), - descriptor.InstrumentationVersion(), - } -} - -// NewMetricKindMismatchError formats an error that describes a -// mismatched metric instrument definition. -func NewMetricKindMismatchError(desc metric.Descriptor) error { - return fmt.Errorf("Metric was %s (%s %s)registered as a %s %s: %w", - desc.Name(), - desc.InstrumentationName(), - desc.InstrumentationVersion(), - desc.NumberKind(), - desc.InstrumentKind(), - ErrMetricKindMismatch) -} - -// Compatible determines whether two metric.Descriptors are considered -// the same for the purpose of uniqueness checking. -func Compatible(candidate, existing metric.Descriptor) bool { - return candidate.InstrumentKind() == existing.InstrumentKind() && - candidate.NumberKind() == existing.NumberKind() -} - -// checkUniqueness returns an ErrMetricKindMismatch error if there is -// a conflict between a descriptor that was already registered and the -// `descriptor` argument. If there is an existing compatible -// registration, this returns the already-registered instrument. If -// there is no conflict and no prior registration, returns (nil, nil). -func (u *uniqueInstrumentMeterImpl) checkUniqueness(descriptor metric.Descriptor) (metric.InstrumentImpl, error) { - impl, ok := u.state[keyOf(descriptor)] - if !ok { - return nil, nil - } - - if !Compatible(descriptor, impl.Descriptor()) { - return nil, NewMetricKindMismatchError(impl.Descriptor()) - } - - return impl, nil -} - -// NewSyncInstrument implements metric.MeterImpl. -func (u *uniqueInstrumentMeterImpl) NewSyncInstrument(descriptor metric.Descriptor) (metric.SyncImpl, error) { - u.lock.Lock() - defer u.lock.Unlock() - - impl, err := u.checkUniqueness(descriptor) - - if err != nil { - return nil, err - } else if impl != nil { - return impl.(metric.SyncImpl), nil - } - - syncInst, err := u.impl.NewSyncInstrument(descriptor) - if err != nil { - return nil, err - } - u.state[keyOf(descriptor)] = syncInst - return syncInst, nil -} - -// NewAsyncInstrument implements metric.MeterImpl. -func (u *uniqueInstrumentMeterImpl) NewAsyncInstrument( - descriptor metric.Descriptor, - runner metric.AsyncRunner, -) (metric.AsyncImpl, error) { - u.lock.Lock() - defer u.lock.Unlock() - - impl, err := u.checkUniqueness(descriptor) - - if err != nil { - return nil, err - } else if impl != nil { - return impl.(metric.AsyncImpl), nil - } - - asyncInst, err := u.impl.NewAsyncInstrument(descriptor, runner) - if err != nil { - return nil, err - } - u.state[keyOf(descriptor)] = asyncInst - return asyncInst, nil -} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/pre_release.sh b/src/runtime/vendor/go.opentelemetry.io/otel/pre_release.sh index 34290a1c8..0de22169c 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/pre_release.sh +++ b/src/runtime/vendor/go.opentelemetry.io/otel/pre_release.sh @@ -74,7 +74,7 @@ sed "s/\(return \"\)[0-9]*\.[0-9]*\.[0-9]*\"/\1${OTEL_VERSION}\"/" ./version.go. rm -f ./version.go.bak # Update go.mod -git checkout -b pre_release_${TAG} master +git checkout -b pre_release_${TAG} main PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep -v 'tools' | sed 's/^\.\///' | sort) for dir in $PACKAGE_DIRS; do @@ -91,5 +91,5 @@ git add . make ci git commit -m "Prepare for releasing $TAG" -printf "Now run following to verify the changes.\ngit diff master\n" +printf "Now run following to verify the changes.\ngit diff main\n" printf "\nThen push the changes to upstream\n" diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/propagation.go b/src/runtime/vendor/go.opentelemetry.io/otel/propagation.go index 241a8cd45..d29aaa32c 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/propagation.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/propagation.go @@ -25,7 +25,7 @@ func GetTextMapPropagator() propagation.TextMapPropagator { return global.TextMapPropagator() } -// SetTextMapPropagator sets propagator as the global TSetTextMapPropagator. +// SetTextMapPropagator sets propagator as the global TextMapPropagator. func SetTextMapPropagator(propagator propagation.TextMapPropagator) { global.SetTextMapPropagator(propagator) } diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/src/runtime/vendor/go.opentelemetry.io/otel/propagation/baggage.go index 2c416d7fb..16afed531 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/propagation/baggage.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/propagation/baggage.go @@ -16,11 +16,8 @@ package propagation // import "go.opentelemetry.io/otel/propagation" import ( "context" - "net/url" - "strings" - "go.opentelemetry.io/otel/internal/baggage" - "go.opentelemetry.io/otel/label" + "go.opentelemetry.io/otel/baggage" ) const baggageHeader = "baggage" @@ -35,74 +32,24 @@ var _ TextMapPropagator = Baggage{} // Inject sets baggage key-values from ctx into the carrier. func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { - baggageMap := baggage.MapFromContext(ctx) - firstIter := true - var headerValueBuilder strings.Builder - baggageMap.Foreach(func(kv label.KeyValue) bool { - if !firstIter { - headerValueBuilder.WriteRune(',') - } - firstIter = false - headerValueBuilder.WriteString(url.QueryEscape(strings.TrimSpace((string)(kv.Key)))) - headerValueBuilder.WriteRune('=') - headerValueBuilder.WriteString(url.QueryEscape(strings.TrimSpace(kv.Value.Emit()))) - return true - }) - if headerValueBuilder.Len() > 0 { - headerString := headerValueBuilder.String() - carrier.Set(baggageHeader, headerString) + bStr := baggage.FromContext(ctx).String() + if bStr != "" { + carrier.Set(baggageHeader, bStr) } } // Extract returns a copy of parent with the baggage from the carrier added. func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context { - bVal := carrier.Get(baggageHeader) - if bVal == "" { + bStr := carrier.Get(baggageHeader) + if bStr == "" { return parent } - baggageValues := strings.Split(bVal, ",") - keyValues := make([]label.KeyValue, 0, len(baggageValues)) - for _, baggageValue := range baggageValues { - valueAndProps := strings.Split(baggageValue, ";") - if len(valueAndProps) < 1 { - continue - } - nameValue := strings.Split(valueAndProps[0], "=") - if len(nameValue) < 2 { - continue - } - name, err := url.QueryUnescape(nameValue[0]) - if err != nil { - continue - } - trimmedName := strings.TrimSpace(name) - value, err := url.QueryUnescape(nameValue[1]) - if err != nil { - continue - } - trimmedValue := strings.TrimSpace(value) - - // TODO (skaris): properties defiend https://w3c.github.io/correlation-context/, are currently - // just put as part of the value. - var trimmedValueWithProps strings.Builder - trimmedValueWithProps.WriteString(trimmedValue) - for _, prop := range valueAndProps[1:] { - trimmedValueWithProps.WriteRune(';') - trimmedValueWithProps.WriteString(prop) - } - - keyValues = append(keyValues, label.String(trimmedName, trimmedValueWithProps.String())) - } - - if len(keyValues) > 0 { - // Only update the context if valid values were found - return baggage.ContextWithMap(parent, baggage.NewMap(baggage.MapUpdate{ - MultiKV: keyValues, - })) + bag, err := baggage.Parse(bStr) + if err != nil { + return parent } - - return parent + return baggage.ContextWithBaggage(parent, bag) } // Fields returns the keys who's values are set with Inject. diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/propagation/doc.go b/src/runtime/vendor/go.opentelemetry.io/otel/propagation/doc.go index 89573f1ba..9bef5e53d 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/propagation/doc.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/propagation/doc.go @@ -15,10 +15,6 @@ /* Package propagation contains OpenTelemetry context propagators. -This package is currently in a pre-GA phase. Backwards incompatible changes -may be introduced in subsequent minor version releases as we work to track the -evolving OpenTelemetry specification and user feedback. - OpenTelemetry propagators are used to extract and inject context data from and into messages exchanged by applications. The propagator supported by this package is the W3C Trace Context encoding diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/src/runtime/vendor/go.opentelemetry.io/otel/propagation/propagation.go index c867f86b8..2f54532d9 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/propagation/propagation.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/propagation/propagation.go @@ -14,25 +14,74 @@ package propagation // import "go.opentelemetry.io/otel/propagation" -import "context" +import ( + "context" + "net/http" +) // TextMapCarrier is the storage medium used by a TextMapPropagator. type TextMapCarrier interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + // Get returns the value associated with the passed key. Get(key string) string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + // Set stores the key-value pair. Set(key string, value string) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Keys lists the keys stored in this carrier. + Keys() []string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// HeaderCarrier adapts http.Header to satisfy the TextMapCarrier interface. +type HeaderCarrier http.Header + +// Get returns the value associated with the passed key. +func (hc HeaderCarrier) Get(key string) string { + return http.Header(hc).Get(key) +} + +// Set stores the key-value pair. +func (hc HeaderCarrier) Set(key string, value string) { + http.Header(hc).Set(key, value) +} + +// Keys lists the keys stored in this carrier. +func (hc HeaderCarrier) Keys() []string { + keys := make([]string, 0, len(hc)) + for k := range hc { + keys = append(keys, k) + } + return keys } // TextMapPropagator propagates cross-cutting concerns as key-value text // pairs within a carrier that travels in-band across process boundaries. type TextMapPropagator interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + // Inject set cross-cutting concerns from the Context into the carrier. Inject(ctx context.Context, carrier TextMapCarrier) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + // Extract reads cross-cutting concerns from the carrier into a Context. Extract(ctx context.Context, carrier TextMapCarrier) context.Context - // Fields returns the keys who's values are set with Inject. + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Fields returns the keys whose values are set with Inject. Fields() []string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. } type compositeTextMapPropagator []TextMapPropagator diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/src/runtime/vendor/go.opentelemetry.io/otel/propagation/trace_context.go index ec99e0965..902692da0 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/propagation/trace_context.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/propagation/trace_context.go @@ -30,12 +30,6 @@ const ( tracestateHeader = "tracestate" ) -type traceContextPropagatorKeyType uint - -const ( - tracestateKey traceContextPropagatorKeyType = 0 -) - // TraceContext is a propagator that supports the W3C Trace Context format // (https://www.w3.org/TR/trace-context/) // @@ -51,30 +45,32 @@ var traceCtxRegExp = regexp.MustCompile("^(?P[0-9a-f]{2})-(?P[ // Inject set tracecontext from the Context into the carrier. func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { - tracestate := ctx.Value(tracestateKey) - if state, ok := tracestate.(string); tracestate != nil && ok { - carrier.Set(tracestateHeader, state) - } - sc := trace.SpanContextFromContext(ctx) if !sc.IsValid() { return } - h := fmt.Sprintf("%.2x-%s-%s-%.2x", + + if ts := sc.TraceState().String(); ts != "" { + carrier.Set(tracestateHeader, ts) + } + + // Clear all flags other than the trace-context supported sampling bit. + flags := sc.TraceFlags() & trace.FlagsSampled + + h := fmt.Sprintf("%.2x-%s-%s-%s", supportedVersion, - sc.TraceID, - sc.SpanID, - sc.TraceFlags&trace.FlagsSampled) + sc.TraceID(), + sc.SpanID(), + flags) carrier.Set(traceparentHeader, h) } // Extract reads tracecontext from the carrier into a returned Context. +// +// The returned Context will be a copy of ctx and contain the extracted +// tracecontext as the remote SpanContext. If the extracted tracecontext is +// invalid, the passed ctx will be returned directly instead. func (tc TraceContext) Extract(ctx context.Context, carrier TextMapCarrier) context.Context { - state := carrier.Get(tracestateHeader) - if state != "" { - ctx = context.WithValue(ctx, tracestateKey, state) - } - sc := tc.extract(carrier) if !sc.IsValid() { return ctx @@ -118,9 +114,9 @@ func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { return trace.SpanContext{} } - var sc trace.SpanContext + var scc trace.SpanContextConfig - sc.TraceID, err = trace.TraceIDFromHex(matches[2][:32]) + scc.TraceID, err = trace.TraceIDFromHex(matches[2][:32]) if err != nil { return trace.SpanContext{} } @@ -128,7 +124,7 @@ func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { if len(matches[3]) != 16 { return trace.SpanContext{} } - sc.SpanID, err = trace.SpanIDFromHex(matches[3]) + scc.SpanID, err = trace.SpanIDFromHex(matches[3]) if err != nil { return trace.SpanContext{} } @@ -141,8 +137,15 @@ func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { return trace.SpanContext{} } // Clear all flags other than the trace-context supported sampling bit. - sc.TraceFlags = opts[0] & trace.FlagsSampled + scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled + + // Ignore the error returned here. Failure to parse tracestate MUST NOT + // affect the parsing of traceparent according to the W3C tracecontext + // specification. + scc.TraceState, _ = trace.ParseTraceState(carrier.Get(tracestateHeader)) + scc.Remote = true + sc := trace.NewSpanContext(scc) if !sc.IsValid() { return trace.SpanContext{} } diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/export/trace/trace.go b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/export/trace/trace.go deleted file mode 100644 index a9883bff7..000000000 --- a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/export/trace/trace.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace // import "go.opentelemetry.io/otel/sdk/export/trace" - -import ( - "context" - "time" - - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/label" - "go.opentelemetry.io/otel/trace" - - "go.opentelemetry.io/otel/sdk/instrumentation" - "go.opentelemetry.io/otel/sdk/resource" -) - -// SpanExporter handles the delivery of SpanData to external receivers. This is -// the final component in the trace export pipeline. -type SpanExporter interface { - // ExportSpans exports a batch of SpanData. - // - // This function is called synchronously, so there is no concurrency - // safety requirement. However, due to the synchronous calling pattern, - // it is critical that all timeouts and cancellations contained in the - // passed context must be honored. - // - // Any retry logic must be contained in this function. The SDK that - // calls this function will not implement any retry logic. All errors - // returned by this function are considered unrecoverable and will be - // reported to a configured error Handler. - ExportSpans(ctx context.Context, spanData []*SpanData) error - // Shutdown notifies the exporter of a pending halt to operations. The - // exporter is expected to preform any cleanup or synchronization it - // requires while honoring all timeouts and cancellations contained in - // the passed context. - Shutdown(ctx context.Context) error -} - -// SpanData contains all the information collected by a completed span. -type SpanData struct { - SpanContext trace.SpanContext - ParentSpanID trace.SpanID - SpanKind trace.SpanKind - Name string - StartTime time.Time - // The wall clock time of EndTime will be adjusted to always be offset - // from StartTime by the duration of the span. - EndTime time.Time - Attributes []label.KeyValue - MessageEvents []Event - Links []trace.Link - StatusCode codes.Code - StatusMessage string - HasRemoteParent bool - DroppedAttributeCount int - DroppedMessageEventCount int - DroppedLinkCount int - - // ChildSpanCount holds the number of child span created for this span. - ChildSpanCount int - - // Resource contains attributes representing an entity that produced this span. - Resource *resource.Resource - - // InstrumentationLibrary defines the instrumentation library used to - // provide instrumentation. - InstrumentationLibrary instrumentation.Library -} - -// Event is thing that happened during a Span's lifetime. -type Event struct { - // Name is the name of this event - Name string - - // Attributes describe the aspects of the event. - Attributes []label.KeyValue - - // Time is the time at which this event was recorded. - Time time.Time -} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go index 1bf64a65d..6f0016169 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go @@ -16,12 +16,8 @@ Package instrumentation provides an instrumentation library structure to be passed to both the OpenTelemetry Tracer and Meter components. -This package is currently in a pre-GA phase. Backwards incompatible changes -may be introduced in subsequent minor version releases as we work to track the -evolving OpenTelemetry specification and user feedback. - For more information see -[this](https://github.com/open-telemetry/oteps/blob/master/text/0083-component.md). +[this](https://github.com/open-telemetry/oteps/blob/main/text/0083-component.md). */ package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" @@ -32,4 +28,6 @@ type Library struct { Name string // Version is the version of the instrumentation library. Version string + // SchemaURL of the telemetry emitted by the library. + SchemaURL string } diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go index c754e221e..a5eaa7e5d 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go @@ -29,12 +29,17 @@ var ( // Detector detects OpenTelemetry resource information type Detector interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + // Detect returns an initialized Resource based on gathered information. // If the source information to construct a Resource contains invalid // values, a Resource is returned with the valid parts of the source // information used for initialization along with an appropriately // wrapped ErrPartialResource error. Detect(ctx context.Context) (*Resource, error) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. } // Detect calls all input detectors sequentially and merges each result with the previous one. @@ -53,7 +58,10 @@ func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) { continue } } - autoDetectedRes = Merge(autoDetectedRes, res) + autoDetectedRes, err = Merge(autoDetectedRes, res) + if err != nil { + errInfo = append(errInfo, err.Error()) + } } var aggregatedError error diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go index a9a0fc889..24d5d43bf 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -18,42 +18,48 @@ import ( "context" "fmt" "os" + "path/filepath" "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/label" - "go.opentelemetry.io/otel/semconv" + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.4.0" ) type ( - // TelemetrySDK is a Detector that provides information about + // telemetrySDK is a Detector that provides information about // the OpenTelemetry SDK used. This Detector is included as a // builtin. If these resource attributes are not wanted, use // the WithTelemetrySDK(nil) or WithoutBuiltin() options to // explicitly disable them. - TelemetrySDK struct{} + telemetrySDK struct{} - // Host is a Detector that provides information about the host + // host is a Detector that provides information about the host // being run on. This Detector is included as a builtin. If // these resource attributes are not wanted, use the // WithHost(nil) or WithoutBuiltin() options to explicitly // disable them. - Host struct{} + host struct{} stringDetector struct { - K label.Key - F func() (string, error) + schemaURL string + K attribute.Key + F func() (string, error) } + + defaultServiceNameDetector struct{} ) var ( - _ Detector = TelemetrySDK{} - _ Detector = Host{} + _ Detector = telemetrySDK{} + _ Detector = host{} _ Detector = stringDetector{} + _ Detector = defaultServiceNameDetector{} ) // Detect returns a *Resource that describes the OpenTelemetry SDK used. -func (TelemetrySDK) Detect(context.Context) (*Resource, error) { +func (telemetrySDK) Detect(context.Context) (*Resource, error) { return NewWithAttributes( + semconv.SchemaURL, semconv.TelemetrySDKNameKey.String("opentelemetry"), semconv.TelemetrySDKLanguageKey.String("go"), semconv.TelemetrySDKVersionKey.String(otel.Version()), @@ -61,21 +67,42 @@ func (TelemetrySDK) Detect(context.Context) (*Resource, error) { } // Detect returns a *Resource that describes the host being run on. -func (Host) Detect(ctx context.Context) (*Resource, error) { - return StringDetector(semconv.HostNameKey, os.Hostname).Detect(ctx) +func (host) Detect(ctx context.Context) (*Resource, error) { + return StringDetector(semconv.SchemaURL, semconv.HostNameKey, os.Hostname).Detect(ctx) } // StringDetector returns a Detector that will produce a *Resource -// containing the string as a value corresponding to k. -func StringDetector(k label.Key, f func() (string, error)) Detector { - return stringDetector{K: k, F: f} +// containing the string as a value corresponding to k. The resulting Resource +// will have the specified schemaURL. +func StringDetector(schemaURL string, k attribute.Key, f func() (string, error)) Detector { + return stringDetector{schemaURL: schemaURL, K: k, F: f} } -// Detect implements Detector. +// Detect returns a *Resource that describes the string as a value +// corresponding to attribute.Key as well as the specific schemaURL. func (sd stringDetector) Detect(ctx context.Context) (*Resource, error) { value, err := sd.F() if err != nil { return nil, fmt.Errorf("%s: %w", string(sd.K), err) } - return NewWithAttributes(sd.K.String(value)), nil + a := sd.K.String(value) + if !a.Valid() { + return nil, fmt.Errorf("invalid attribute: %q -> %q", a.Key, a.Value.Emit()) + } + return NewWithAttributes(sd.schemaURL, sd.K.String(value)), nil +} + +// Detect implements Detector +func (defaultServiceNameDetector) Detect(ctx context.Context) (*Resource, error) { + return StringDetector( + semconv.SchemaURL, + semconv.ServiceNameKey, + func() (string, error) { + executable, err := os.Executable() + if err != nil { + return "unknown_service:go", nil + } + return "unknown_service:" + filepath.Base(executable), nil + }, + ).Detect(ctx) } diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/config.go b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/config.go index 5082816e8..5fa45859b 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/config.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/config.go @@ -17,134 +17,155 @@ package resource // import "go.opentelemetry.io/otel/sdk/resource" import ( "context" - "go.opentelemetry.io/otel/label" + "go.opentelemetry.io/otel/attribute" ) // config contains configuration for Resource creation. type config struct { // detectors that will be evaluated. detectors []Detector - - // telemetrySDK is used to specify non-default - // `telemetry.sdk.*` attributes. - telemetrySDK Detector - - // HostResource is used to specify non-default `host.*` - // attributes. - host Detector - - // FromEnv is used to specify non-default OTEL_RESOURCE_ATTRIBUTES - // attributes. - fromEnv Detector + // SchemaURL to associate with the Resource. + schemaURL string } // Option is the interface that applies a configuration option. type Option interface { - // Apply sets the Option value of a config. - Apply(*config) + // apply sets the Option value of a config. + apply(*config) } // WithAttributes adds attributes to the configured Resource. -func WithAttributes(attributes ...label.KeyValue) Option { +func WithAttributes(attributes ...attribute.KeyValue) Option { return WithDetectors(detectAttributes{attributes}) } type detectAttributes struct { - attributes []label.KeyValue + attributes []attribute.KeyValue } func (d detectAttributes) Detect(context.Context) (*Resource, error) { - return NewWithAttributes(d.attributes...), nil + return NewSchemaless(d.attributes...), nil } // WithDetectors adds detectors to be evaluated for the configured resource. func WithDetectors(detectors ...Detector) Option { - return detectorsOption{detectors} + return detectorsOption{detectors: detectors} } type detectorsOption struct { detectors []Detector } -// Apply implements Option. -func (o detectorsOption) Apply(cfg *config) { +func (o detectorsOption) apply(cfg *config) { cfg.detectors = append(cfg.detectors, o.detectors...) } -// WithTelemetrySDK overrides the builtin `telemetry.sdk.*` -// attributes. Use nil to disable these attributes entirely. -func WithTelemetrySDK(d Detector) Option { - return telemetrySDKOption{d} +// WithFromEnv adds attributes from environment variables to the configured resource. +func WithFromEnv() Option { + return WithDetectors(fromEnv{}) } -type telemetrySDKOption struct { - Detector +// WithHost adds attributes from the host to the configured resource. +func WithHost() Option { + return WithDetectors(host{}) } -// Apply implements Option. -func (o telemetrySDKOption) Apply(cfg *config) { - cfg.telemetrySDK = o.Detector +// WithTelemetrySDK adds TelemetrySDK version info to the configured resource. +func WithTelemetrySDK() Option { + return WithDetectors(telemetrySDK{}) } -// WithHost overrides the builtin `host.*` attributes. Use nil to -// disable these attributes entirely. -func WithHost(d Detector) Option { - return hostOption{d} +// WithSchemaURL sets the schema URL for the configured resource. +func WithSchemaURL(schemaURL string) Option { + return schemaURLOption(schemaURL) } -type hostOption struct { - Detector +type schemaURLOption string + +func (o schemaURLOption) apply(cfg *config) { + cfg.schemaURL = string(o) } -// Apply implements Option. -func (o hostOption) Apply(cfg *config) { - cfg.host = o.Detector +// WithOS adds all the OS attributes to the configured Resource. +// See individual WithOS* functions to configure specific attributes. +func WithOS() Option { + return WithDetectors( + osTypeDetector{}, + osDescriptionDetector{}, + ) } -// WithFromEnv overrides the builtin detector for -// OTEL_RESOURCE_ATTRIBUTES. Use nil to disable environment checking. -func WithFromEnv(d Detector) Option { - return fromEnvOption{d} +// WithOSType adds an attribute with the operating system type to the configured Resource. +func WithOSType() Option { + return WithDetectors(osTypeDetector{}) } -type fromEnvOption struct { - Detector +// WithOSDescription adds an attribute with the operating system description to the +// configured Resource. The formatted string is equivalent to the output of the +// `uname -snrvm` command. +func WithOSDescription() Option { + return WithDetectors(osDescriptionDetector{}) } -// Apply implements Option. -func (o fromEnvOption) Apply(cfg *config) { - cfg.fromEnv = o.Detector +// WithProcess adds all the Process attributes to the configured Resource. +// See individual WithProcess* functions to configure specific attributes. +func WithProcess() Option { + return WithDetectors( + processPIDDetector{}, + processExecutableNameDetector{}, + processExecutablePathDetector{}, + processCommandArgsDetector{}, + processOwnerDetector{}, + processRuntimeNameDetector{}, + processRuntimeVersionDetector{}, + processRuntimeDescriptionDetector{}, + ) } -// WithoutBuiltin disables all the builtin detectors, including the -// telemetry.sdk.*, host.*, and the environment detector. -func WithoutBuiltin() Option { - return noBuiltinOption{} +// WithProcessPID adds an attribute with the process identifier (PID) to the +// configured Resource. +func WithProcessPID() Option { + return WithDetectors(processPIDDetector{}) } -type noBuiltinOption struct{} +// WithProcessExecutableName adds an attribute with the name of the process +// executable to the configured Resource. +func WithProcessExecutableName() Option { + return WithDetectors(processExecutableNameDetector{}) +} -// Apply implements Option. -func (o noBuiltinOption) Apply(cfg *config) { - cfg.host = nil - cfg.telemetrySDK = nil - cfg.fromEnv = nil +// WithProcessExecutablePath adds an attribute with the full path to the process +// executable to the configured Resource. +func WithProcessExecutablePath() Option { + return WithDetectors(processExecutablePathDetector{}) } -// New returns a Resource combined from the provided attributes, -// user-provided detectors and builtin detectors. -func New(ctx context.Context, opts ...Option) (*Resource, error) { - cfg := config{ - telemetrySDK: TelemetrySDK{}, - host: Host{}, - fromEnv: FromEnv{}, - } - for _, opt := range opts { - opt.Apply(&cfg) - } - detectors := append( - []Detector{cfg.telemetrySDK, cfg.host, cfg.fromEnv}, - cfg.detectors..., - ) - return Detect(ctx, detectors...) +// WithProcessCommandArgs adds an attribute with all the command arguments (including +// the command/executable itself) as received by the process the configured Resource. +func WithProcessCommandArgs() Option { + return WithDetectors(processCommandArgsDetector{}) +} + +// WithProcessOwner adds an attribute with the username of the user that owns the process +// to the configured Resource. +func WithProcessOwner() Option { + return WithDetectors(processOwnerDetector{}) +} + +// WithProcessRuntimeName adds an attribute with the name of the runtime of this +// process to the configured Resource. +func WithProcessRuntimeName() Option { + return WithDetectors(processRuntimeNameDetector{}) +} + +// WithProcessRuntimeVersion adds an attribute with the version of the runtime of +// this process to the configured Resource. +func WithProcessRuntimeVersion() Option { + return WithDetectors(processRuntimeVersionDetector{}) +} + +// WithProcessRuntimeDescription adds an attribute with an additional description +// about the runtime of the process to the configured Resource. +func WithProcessRuntimeDescription() Option { + return WithDetectors(processRuntimeDescriptionDetector{}) } diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go index fe34f896a..9aab3d839 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go @@ -14,10 +14,6 @@ // Package resource provides detecting and representing resources. // -// This package is currently in a pre-GA phase. Backwards incompatible changes -// may be introduced in subsequent minor version releases as we work to track -// the evolving OpenTelemetry specification and user feedback. -// // The fundamental struct is a Resource which holds identifying information // about the entities for which telemetry is exported. // diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/env.go index a8b918959..d08096c9d 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/env.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/env.go @@ -20,40 +20,67 @@ import ( "os" "strings" - "go.opentelemetry.io/otel/label" + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.4.0" ) -// envVar is the environment variable name OpenTelemetry Resource information can be assigned to. -const envVar = "OTEL_RESOURCE_ATTRIBUTES" +const ( + // resourceAttrKey is the environment variable name OpenTelemetry Resource information will be read from. + resourceAttrKey = "OTEL_RESOURCE_ATTRIBUTES" + + // svcNameKey is the environment variable name that Service Name information will be read from. + svcNameKey = "OTEL_SERVICE_NAME" +) var ( // errMissingValue is returned when a resource value is missing. errMissingValue = fmt.Errorf("%w: missing value", ErrPartialResource) ) -// FromEnv is a Detector that implements the Detector and collects +// fromEnv is a Detector that implements the Detector and collects // resources from environment. This Detector is included as a -// builtin. If these resource attributes are not wanted, use the -// WithFromEnv(nil) or WithoutBuiltin() options to explicitly disable -// them. -type FromEnv struct{} +// builtin. +type fromEnv struct{} // compile time assertion that FromEnv implements Detector interface -var _ Detector = FromEnv{} +var _ Detector = fromEnv{} // Detect collects resources from environment -func (FromEnv) Detect(context.Context) (*Resource, error) { - labels := strings.TrimSpace(os.Getenv(envVar)) +func (fromEnv) Detect(context.Context) (*Resource, error) { + attrs := strings.TrimSpace(os.Getenv(resourceAttrKey)) + svcName := strings.TrimSpace(os.Getenv(svcNameKey)) - if labels == "" { + if attrs == "" && svcName == "" { return Empty(), nil } - return constructOTResources(labels) + + var res *Resource + + if svcName != "" { + res = NewSchemaless(semconv.ServiceNameKey.String(svcName)) + } + + r2, err := constructOTResources(attrs) + + // Ensure that the resource with the service name from OTEL_SERVICE_NAME + // takes precedence, if it was defined. + res, err2 := Merge(r2, res) + + if err == nil { + err = err2 + } else if err2 != nil { + err = fmt.Errorf("detecting resources: %s", []string{err.Error(), err2.Error()}) + } + + return res, err } func constructOTResources(s string) (*Resource, error) { + if s == "" { + return Empty(), nil + } pairs := strings.Split(s, ",") - labels := []label.KeyValue{} + attrs := []attribute.KeyValue{} var invalid []string for _, p := range pairs { field := strings.SplitN(p, "=", 2) @@ -62,11 +89,11 @@ func constructOTResources(s string) (*Resource, error) { continue } k, v := strings.TrimSpace(field[0]), strings.TrimSpace(field[1]) - labels = append(labels, label.String(k, v)) + attrs = append(attrs, attribute.String(k, v)) } var err error if len(invalid) > 0 { err = fmt.Errorf("%w: %v", errMissingValue, invalid) } - return NewWithAttributes(labels...), err + return NewSchemaless(attrs...), err } diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/os.go new file mode 100644 index 000000000..ff0072d85 --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/os.go @@ -0,0 +1,97 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "strings" + + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.4.0" +) + +type osDescriptionProvider func() (string, error) + +var defaultOSDescriptionProvider osDescriptionProvider = platformOSDescription + +var osDescription = defaultOSDescriptionProvider + +func setDefaultOSDescriptionProvider() { + setOSDescriptionProvider(defaultOSDescriptionProvider) +} + +func setOSDescriptionProvider(osDescriptionProvider osDescriptionProvider) { + osDescription = osDescriptionProvider +} + +type osTypeDetector struct{} +type osDescriptionDetector struct{} + +// Detect returns a *Resource that describes the operating system type the +// service is running on. +func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) { + osType := runtimeOS() + + osTypeAttribute := mapRuntimeOSToSemconvOSType(osType) + + return NewWithAttributes( + semconv.SchemaURL, + osTypeAttribute, + ), nil +} + +// Detect returns a *Resource that describes the operating system the +// service is running on. +func (osDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { + description, err := osDescription() + + if err != nil { + return nil, err + } + + return NewWithAttributes( + semconv.SchemaURL, + semconv.OSDescriptionKey.String(description), + ), nil +} + +// mapRuntimeOSToSemconvOSType translates the OS name as provided by the Go runtime +// into an OS type attribute with the corresponding value defined by the semantic +// conventions. In case the provided OS name isn't mapped, it's transformed to lowercase +// and used as the value for the returned OS type attribute. +func mapRuntimeOSToSemconvOSType(osType string) attribute.KeyValue { + // the elements in this map are the intersection between + // available GOOS values and defined semconv OS types + osTypeAttributeMap := map[string]attribute.KeyValue{ + "darwin": semconv.OSTypeDarwin, + "dragonfly": semconv.OSTypeDragonflyBSD, + "freebsd": semconv.OSTypeFreeBSD, + "linux": semconv.OSTypeLinux, + "netbsd": semconv.OSTypeNetBSD, + "openbsd": semconv.OSTypeOpenBSD, + "solaris": semconv.OSTypeSolaris, + "windows": semconv.OSTypeWindows, + } + + var osTypeAttribute attribute.KeyValue + + if attr, ok := osTypeAttributeMap[osType]; ok { + osTypeAttribute = attr + } else { + osTypeAttribute = semconv.OSTypeKey.String(strings.ToLower(osType)) + } + + return osTypeAttribute +} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go new file mode 100644 index 000000000..24ec85793 --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go @@ -0,0 +1,102 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "encoding/xml" + "fmt" + "io" + "os" +) + +type plist struct { + XMLName xml.Name `xml:"plist"` + Dict dict `xml:"dict"` +} + +type dict struct { + Key []string `xml:"key"` + String []string `xml:"string"` +} + +// osRelease builds a string describing the operating system release based on the +// contents of the property list (.plist) system files. If no .plist files are found, +// or if the required properties to build the release description string are missing, +// an empty string is returned instead. The generated string resembles the output of +// the `sw_vers` commandline program, but in a single-line string. For more information +// about the `sw_vers` program, see: https://www.unix.com/man-page/osx/1/SW_VERS. +func osRelease() string { + file, err := getPlistFile() + if err != nil { + return "" + } + + defer file.Close() + + values, err := parsePlistFile(file) + if err != nil { + return "" + } + + return buildOSRelease(values) +} + +// getPlistFile returns a *os.File pointing to one of the well-known .plist files +// available on macOS. If no file can be opened, it returns an error. +func getPlistFile() (*os.File, error) { + return getFirstAvailableFile([]string{ + "/System/Library/CoreServices/SystemVersion.plist", + "/System/Library/CoreServices/ServerVersion.plist", + }) +} + +// parsePlistFile process the file pointed by `file` as a .plist file and returns +// a map with the key-values for each pair of correlated and elements +// contained in it. +func parsePlistFile(file io.Reader) (map[string]string, error) { + var v plist + + err := xml.NewDecoder(file).Decode(&v) + if err != nil { + return nil, err + } + + if len(v.Dict.Key) != len(v.Dict.String) { + return nil, fmt.Errorf("the number of and elements doesn't match") + } + + properties := make(map[string]string, len(v.Dict.Key)) + for i, key := range v.Dict.Key { + properties[key] = v.Dict.String[i] + } + + return properties, nil +} + +// buildOSRelease builds a string describing the OS release based on the properties +// available on the provided map. It tries to find the `ProductName`, `ProductVersion` +// and `ProductBuildVersion` properties. If some of these properties are not found, +// it returns an empty string. +func buildOSRelease(properties map[string]string) string { + productName := properties["ProductName"] + productVersion := properties["ProductVersion"] + productBuildVersion := properties["ProductBuildVersion"] + + if productName == "" || productVersion == "" || productBuildVersion == "" { + return "" + } + + return fmt.Sprintf("%s %s (%s)", productName, productVersion, productBuildVersion) +} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go new file mode 100644 index 000000000..8e3a8066c --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go @@ -0,0 +1,153 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build aix dragonfly freebsd linux netbsd openbsd solaris zos + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" +) + +// osRelease builds a string describing the operating system release based on the +// properties of the os-release file. If no os-release file is found, or if the +// required properties to build the release description string are missing, an empty +// string is returned instead. For more information about os-release files, see: +// https://www.freedesktop.org/software/systemd/man/os-release.html +func osRelease() string { + file, err := getOSReleaseFile() + if err != nil { + return "" + } + + defer file.Close() + + values := parseOSReleaseFile(file) + + return buildOSRelease(values) +} + +// getOSReleaseFile returns a *os.File pointing to one of the well-known os-release +// files, according to their order of preference. If no file can be opened, it +// returns an error. +func getOSReleaseFile() (*os.File, error) { + return getFirstAvailableFile([]string{"/etc/os-release", "/usr/lib/os-release"}) +} + +// parseOSReleaseFile process the file pointed by `file` as an os-release file and +// returns a map with the key-values contained in it. Empty lines or lines starting +// with a '#' character are ignored, as well as lines with the missing key=value +// separator. Values are unquoted and unescaped. +func parseOSReleaseFile(file io.Reader) map[string]string { + values := make(map[string]string) + scanner := bufio.NewScanner(file) + + for scanner.Scan() { + line := scanner.Text() + + if skip(line) { + continue + } + + key, value, ok := parse(line) + if ok { + values[key] = value + } + } + + return values +} + +// skip returns true if the line is blank or starts with a '#' character, and +// therefore should be skipped from processing. +func skip(line string) bool { + line = strings.TrimSpace(line) + + return len(line) == 0 || strings.HasPrefix(line, "#") +} + +// parse attempts to split the provided line on the first '=' character, and then +// sanitize each side of the split before returning them as a key-value pair. +func parse(line string) (string, string, bool) { + parts := strings.SplitN(line, "=", 2) + + if len(parts) != 2 || len(parts[0]) == 0 { + return "", "", false + } + + key := strings.TrimSpace(parts[0]) + value := unescape(unquote(strings.TrimSpace(parts[1]))) + + return key, value, true +} + +// unquote checks whether the string `s` is quoted with double or single quotes +// and, if so, returns a version of the string without them. Otherwise it returns +// the provided string unchanged. +func unquote(s string) string { + if len(s) < 2 { + return s + } + + if (s[0] == '"' || s[0] == '\'') && s[0] == s[len(s)-1] { + return s[1 : len(s)-1] + } + + return s +} + +// unescape removes the `\` prefix from some characters that are expected +// to have it added in front of them for escaping purposes. +func unescape(s string) string { + return strings.NewReplacer( + `\$`, `$`, + `\"`, `"`, + `\'`, `'`, + `\\`, `\`, + "\\`", "`", + ).Replace(s) +} + +// buildOSRelease builds a string describing the OS release based on the properties +// available on the provided map. It favors a combination of the `NAME` and `VERSION` +// properties as first option (falling back to `VERSION_ID` if `VERSION` isn't +// found), and using `PRETTY_NAME` alone if some of the previous are not present. If +// none of these properties are found, it returns an empty string. +// +// The rationale behind not using `PRETTY_NAME` as first choice was that, for some +// Linux distributions, it doesn't include the same detail that can be found on the +// individual `NAME` and `VERSION` properties, and combining `PRETTY_NAME` with +// other properties can produce "pretty" redundant strings in some cases. +func buildOSRelease(values map[string]string) string { + var osRelease string + + name := values["NAME"] + version := values["VERSION"] + + if version == "" { + version = values["VERSION_ID"] + } + + if name != "" && version != "" { + osRelease = fmt.Sprintf("%s %s", name, version) + } else { + osRelease = values["PRETTY_NAME"] + } + + return osRelease +} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go new file mode 100644 index 000000000..cb0252d7e --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go @@ -0,0 +1,99 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "bytes" + "fmt" + "os" + + "golang.org/x/sys/unix" +) + +type unameProvider func(buf *unix.Utsname) (err error) + +var defaultUnameProvider unameProvider = unix.Uname + +var currentUnameProvider = defaultUnameProvider + +func setDefaultUnameProvider() { + setUnameProvider(defaultUnameProvider) +} + +func setUnameProvider(unameProvider unameProvider) { + currentUnameProvider = unameProvider +} + +// platformOSDescription returns a human readable OS version information string. +// The final string combines OS release information (where available) and the +// result of the `uname` system call. +func platformOSDescription() (string, error) { + uname, err := uname() + if err != nil { + return "", err + } + + osRelease := osRelease() + if osRelease != "" { + return fmt.Sprintf("%s (%s)", osRelease, uname), nil + } + + return uname, nil +} + +// uname issues a uname(2) system call (or equivalent on systems which doesn't +// have one) and formats the output in a single string, similar to the output +// of the `uname` commandline program. The final string resembles the one +// obtained with a call to `uname -snrvm`. +func uname() (string, error) { + var utsName unix.Utsname + + err := currentUnameProvider(&utsName) + if err != nil { + return "", err + } + + return fmt.Sprintf("%s %s %s %s %s", + charsToString(utsName.Sysname[:]), + charsToString(utsName.Nodename[:]), + charsToString(utsName.Release[:]), + charsToString(utsName.Version[:]), + charsToString(utsName.Machine[:]), + ), nil +} + +// charsToString converts a C-like null-terminated char array to a Go string. +func charsToString(charArray []byte) string { + if i := bytes.IndexByte(charArray, 0); i >= 0 { + charArray = charArray[:i] + } + + return string(charArray) +} + +// getFirstAvailableFile returns an *os.File of the first available +// file from a list of candidate file paths. +func getFirstAvailableFile(candidates []string) (*os.File, error) { + for _, c := range candidates { + file, err := os.Open(c) + if err == nil { + return file, nil + } + } + + return nil, fmt.Errorf("no candidate file available: %v", candidates) +} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go new file mode 100644 index 000000000..3ebcb534f --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go @@ -0,0 +1,34 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !aix +// +build !darwin +// +build !dragonfly +// +build !freebsd +// +build !linux +// +build !netbsd +// +build !openbsd +// +build !solaris +// +build !windows +// +build !zos + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +// platformOSDescription is a placeholder implementation for OSes +// for which this project currently doesn't support os.description +// attribute detection. See build tags declaration early on this file +// for a list of unsupported OSes. +func platformOSDescription() (string, error) { + return "", nil +} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go new file mode 100644 index 000000000..faad64d8d --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go @@ -0,0 +1,101 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "fmt" + "strconv" + + "golang.org/x/sys/windows/registry" +) + +// platformOSDescription returns a human readable OS version information string. +// It does so by querying registry values under the +// `SOFTWARE\Microsoft\Windows NT\CurrentVersion` key. The final string +// resembles the one displayed by the Version Reporter Applet (winver.exe). +func platformOSDescription() (string, error) { + k, err := registry.OpenKey( + registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) + + if err != nil { + return "", err + } + + defer k.Close() + + var ( + productName = readProductName(k) + displayVersion = readDisplayVersion(k) + releaseID = readReleaseID(k) + currentMajorVersionNumber = readCurrentMajorVersionNumber(k) + currentMinorVersionNumber = readCurrentMinorVersionNumber(k) + currentBuildNumber = readCurrentBuildNumber(k) + ubr = readUBR(k) + ) + + if displayVersion != "" { + displayVersion += " " + } + + return fmt.Sprintf("%s %s(%s) [Version %s.%s.%s.%s]", + productName, + displayVersion, + releaseID, + currentMajorVersionNumber, + currentMinorVersionNumber, + currentBuildNumber, + ubr, + ), nil +} + +func getStringValue(name string, k registry.Key) string { + value, _, _ := k.GetStringValue(name) + + return value +} + +func getIntegerValue(name string, k registry.Key) uint64 { + value, _, _ := k.GetIntegerValue(name) + + return value +} + +func readProductName(k registry.Key) string { + return getStringValue("ProductName", k) +} + +func readDisplayVersion(k registry.Key) string { + return getStringValue("DisplayVersion", k) +} + +func readReleaseID(k registry.Key) string { + return getStringValue("ReleaseID", k) +} + +func readCurrentMajorVersionNumber(k registry.Key) string { + return strconv.FormatUint(getIntegerValue("CurrentMajorVersionNumber", k), 10) +} + +func readCurrentMinorVersionNumber(k registry.Key) string { + return strconv.FormatUint(getIntegerValue("CurrentMinorVersionNumber", k), 10) +} + +func readCurrentBuildNumber(k registry.Key) string { + return getStringValue("CurrentBuildNumber", k) +} + +func readUBR(k registry.Key) string { + return strconv.FormatUint(getIntegerValue("UBR", k), 10) +} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/process.go new file mode 100644 index 000000000..60f33e5ca --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/process.go @@ -0,0 +1,175 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "fmt" + "os" + "os/user" + "path/filepath" + "runtime" + + semconv "go.opentelemetry.io/otel/semconv/v1.4.0" +) + +type pidProvider func() int +type executablePathProvider func() (string, error) +type commandArgsProvider func() []string +type ownerProvider func() (*user.User, error) +type runtimeNameProvider func() string +type runtimeVersionProvider func() string +type runtimeOSProvider func() string +type runtimeArchProvider func() string + +var ( + defaultPidProvider pidProvider = os.Getpid + defaultExecutablePathProvider executablePathProvider = os.Executable + defaultCommandArgsProvider commandArgsProvider = func() []string { return os.Args } + defaultOwnerProvider ownerProvider = user.Current + defaultRuntimeNameProvider runtimeNameProvider = func() string { return runtime.Compiler } + defaultRuntimeVersionProvider runtimeVersionProvider = runtime.Version + defaultRuntimeOSProvider runtimeOSProvider = func() string { return runtime.GOOS } + defaultRuntimeArchProvider runtimeArchProvider = func() string { return runtime.GOARCH } +) + +var ( + pid = defaultPidProvider + executablePath = defaultExecutablePathProvider + commandArgs = defaultCommandArgsProvider + owner = defaultOwnerProvider + runtimeName = defaultRuntimeNameProvider + runtimeVersion = defaultRuntimeVersionProvider + runtimeOS = defaultRuntimeOSProvider + runtimeArch = defaultRuntimeArchProvider +) + +func setDefaultOSProviders() { + setOSProviders( + defaultPidProvider, + defaultExecutablePathProvider, + defaultCommandArgsProvider, + ) +} + +func setOSProviders( + pidProvider pidProvider, + executablePathProvider executablePathProvider, + commandArgsProvider commandArgsProvider, +) { + pid = pidProvider + executablePath = executablePathProvider + commandArgs = commandArgsProvider +} + +func setDefaultRuntimeProviders() { + setRuntimeProviders( + defaultRuntimeNameProvider, + defaultRuntimeVersionProvider, + defaultRuntimeOSProvider, + defaultRuntimeArchProvider, + ) +} + +func setRuntimeProviders( + runtimeNameProvider runtimeNameProvider, + runtimeVersionProvider runtimeVersionProvider, + runtimeOSProvider runtimeOSProvider, + runtimeArchProvider runtimeArchProvider, +) { + runtimeName = runtimeNameProvider + runtimeVersion = runtimeVersionProvider + runtimeOS = runtimeOSProvider + runtimeArch = runtimeArchProvider +} + +func setDefaultUserProviders() { + setUserProviders(defaultOwnerProvider) +} + +func setUserProviders(ownerProvider ownerProvider) { + owner = ownerProvider +} + +type processPIDDetector struct{} +type processExecutableNameDetector struct{} +type processExecutablePathDetector struct{} +type processCommandArgsDetector struct{} +type processOwnerDetector struct{} +type processRuntimeNameDetector struct{} +type processRuntimeVersionDetector struct{} +type processRuntimeDescriptionDetector struct{} + +// Detect returns a *Resource that describes the process identifier (PID) of the +// executing process. +func (processPIDDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPIDKey.Int(pid())), nil +} + +// Detect returns a *Resource that describes the name of the process executable. +func (processExecutableNameDetector) Detect(ctx context.Context) (*Resource, error) { + executableName := filepath.Base(commandArgs()[0]) + + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableNameKey.String(executableName)), nil +} + +// Detect returns a *Resource that describes the full path of the process executable. +func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, error) { + executablePath, err := executablePath() + if err != nil { + return nil, err + } + + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutablePathKey.String(executablePath)), nil +} + +// Detect returns a *Resource that describes all the command arguments as received +// by the process. +func (processCommandArgsDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgsKey.StringSlice(commandArgs())), nil +} + +// Detect returns a *Resource that describes the username of the user that owns the +// process. +func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) { + owner, err := owner() + if err != nil { + return nil, err + } + + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessOwnerKey.String(owner.Username)), nil +} + +// Detect returns a *Resource that describes the name of the compiler used to compile +// this process image. +func (processRuntimeNameDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeNameKey.String(runtimeName())), nil +} + +// Detect returns a *Resource that describes the version of the runtime of this process. +func (processRuntimeVersionDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersionKey.String(runtimeVersion())), nil +} + +// Detect returns a *Resource that describes the runtime of this process. +func (processRuntimeDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { + runtimeDescription := fmt.Sprintf( + "go version %s %s/%s", runtimeVersion(), runtimeOS(), runtimeArch()) + + return NewWithAttributes( + semconv.SchemaURL, + semconv.ProcessRuntimeDescriptionKey.String(runtimeDescription), + ), nil +} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go index 0141f5b1d..6bb2a2805 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go @@ -15,7 +15,12 @@ package resource // import "go.opentelemetry.io/otel/sdk/resource" import ( - "go.opentelemetry.io/otel/label" + "context" + "errors" + "fmt" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" ) // Resource describes an entity about which identifying information @@ -26,18 +31,83 @@ import ( // (`*resource.Resource`). The `nil` value is equivalent to an empty // Resource. type Resource struct { - labels label.Set + attrs attribute.Set + schemaURL string } -var emptyResource Resource +var ( + emptyResource Resource + + defaultResource = func(r *Resource, err error) *Resource { + if err != nil { + otel.Handle(err) + } + return r + }( + Detect( + context.Background(), + defaultServiceNameDetector{}, + fromEnv{}, + telemetrySDK{}, + ), + ) +) + +var ( + errMergeConflictSchemaURL = errors.New("cannot merge resource due to conflicting Schema URL") +) + +// New returns a Resource combined from the user-provided detectors. +func New(ctx context.Context, opts ...Option) (*Resource, error) { + cfg := config{} + for _, opt := range opts { + opt.apply(&cfg) + } + + resource, err := Detect(ctx, cfg.detectors...) -// NewWithAttributes creates a resource from a set of attributes. If there are -// duplicate keys present in the list of attributes, then the last -// value found for the key is preserved. -func NewWithAttributes(kvs ...label.KeyValue) *Resource { - return &Resource{ - labels: label.NewSet(kvs...), + var err2 error + resource, err2 = Merge(resource, &Resource{schemaURL: cfg.schemaURL}) + if err == nil { + err = err2 + } else if err2 != nil { + err = fmt.Errorf("detecting resources: %s", []string{err.Error(), err2.Error()}) } + + return resource, err +} + +// NewWithAttributes creates a resource from attrs and associates the resource with a +// schema URL. If attrs contains duplicate keys, the last value will be used. If attrs +// contains any invalid items those items will be dropped. The attrs are assumed to be +// in a schema identified by schemaURL. +func NewWithAttributes(schemaURL string, attrs ...attribute.KeyValue) *Resource { + resource := NewSchemaless(attrs...) + resource.schemaURL = schemaURL + return resource +} + +// NewSchemaless creates a resource from attrs. If attrs contains duplicate keys, +// the last value will be used. If attrs contains any invalid items those items will +// be dropped. The resource will not be associated with a schema URL. If the schema +// of the attrs is known use NewWithAttributes instead. +func NewSchemaless(attrs ...attribute.KeyValue) *Resource { + if len(attrs) == 0 { + return &emptyResource + } + + // Ensure attributes comply with the specification: + // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.0.1/specification/common/common.md#attributes + s, _ := attribute.NewSetWithFiltered(attrs, func(kv attribute.KeyValue) bool { + return kv.Valid() + }) + + // If attrs only contains invalid entries do not allocate a new resource. + if s.Len() == 0 { + return &emptyResource + } + + return &Resource{attrs: s} //nolint } // String implements the Stringer interface and provides a @@ -49,25 +119,32 @@ func (r *Resource) String() string { if r == nil { return "" } - return r.labels.Encoded(label.DefaultEncoder()) + return r.attrs.Encoded(attribute.DefaultEncoder()) } // Attributes returns a copy of attributes from the resource in a sorted order. // To avoid allocating a new slice, use an iterator. -func (r *Resource) Attributes() []label.KeyValue { +func (r *Resource) Attributes() []attribute.KeyValue { if r == nil { r = Empty() } - return r.labels.ToSlice() + return r.attrs.ToSlice() +} + +func (r *Resource) SchemaURL() string { + if r == nil { + return "" + } + return r.schemaURL } -// Iter returns an interator of the Resource attributes. +// Iter returns an iterator of the Resource attributes. // This is ideal to use if you do not want a copy of the attributes. -func (r *Resource) Iter() label.Iterator { +func (r *Resource) Iter() attribute.Iterator { if r == nil { r = Empty() } - return r.labels.Iter() + return r.attrs.Iter() } // Equal returns true when a Resource is equivalent to this Resource. @@ -84,56 +161,92 @@ func (r *Resource) Equal(eq *Resource) bool { // Merge creates a new resource by combining resource a and b. // // If there are common keys between resource a and b, then the value -// from resource a is preserved. -func Merge(a, b *Resource) *Resource { +// from resource b will overwrite the value from resource a, even +// if resource b's value is empty. +// +// The SchemaURL of the resources will be merged according to the spec rules: +// https://github.com/open-telemetry/opentelemetry-specification/blob/bad49c714a62da5493f2d1d9bafd7ebe8c8ce7eb/specification/resource/sdk.md#merge +// If the resources have different non-empty schemaURL an empty resource and an error +// will be returned. +func Merge(a, b *Resource) (*Resource, error) { if a == nil && b == nil { - return Empty() + return Empty(), nil } if a == nil { - return b + return b, nil } if b == nil { - return a + return a, nil } - // Note: 'a' labels will overwrite 'b' with last-value-wins in label.Key() - // Meaning this is equivalent to: append(b.Attributes(), a.Attributes()...) - mi := label.NewMergeIterator(a.LabelSet(), b.LabelSet()) - combine := make([]label.KeyValue, 0, a.Len()+b.Len()) + // Merge the schema URL. + var schemaURL string + if a.schemaURL == "" { + schemaURL = b.schemaURL + } else if b.schemaURL == "" { + schemaURL = a.schemaURL + } else if a.schemaURL == b.schemaURL { + schemaURL = a.schemaURL + } else { + return Empty(), errMergeConflictSchemaURL + } + + // Note: 'b' attributes will overwrite 'a' with last-value-wins in attribute.Key() + // Meaning this is equivalent to: append(a.Attributes(), b.Attributes()...) + mi := attribute.NewMergeIterator(b.Set(), a.Set()) + combine := make([]attribute.KeyValue, 0, a.Len()+b.Len()) for mi.Next() { combine = append(combine, mi.Label()) } - return NewWithAttributes(combine...) + merged := NewWithAttributes(schemaURL, combine...) + return merged, nil } -// Empty returns an instance of Resource with no attributes. It is +// Empty returns an instance of Resource with no attributes. It is // equivalent to a `nil` Resource. func Empty() *Resource { return &emptyResource } +// Default returns an instance of Resource with a default +// "service.name" and OpenTelemetrySDK attributes. +func Default() *Resource { + return defaultResource +} + +// Environment returns an instance of Resource with attributes +// extracted from the OTEL_RESOURCE_ATTRIBUTES environment variable. +func Environment() *Resource { + detector := &fromEnv{} + resource, err := detector.Detect(context.Background()) + if err != nil { + otel.Handle(err) + } + return resource +} + // Equivalent returns an object that can be compared for equality -// between two resources. This value is suitable for use as a key in +// between two resources. This value is suitable for use as a key in // a map. -func (r *Resource) Equivalent() label.Distinct { - return r.LabelSet().Equivalent() +func (r *Resource) Equivalent() attribute.Distinct { + return r.Set().Equivalent() } -// LabelSet returns the equivalent *label.Set. -func (r *Resource) LabelSet() *label.Set { +// Set returns the equivalent *attribute.Set of this resource's attributes. +func (r *Resource) Set() *attribute.Set { if r == nil { r = Empty() } - return &r.labels + return &r.attrs } -// MarshalJSON encodes labels as a JSON list of { "Key": "...", "Value": ... } -// pairs in order sorted by key. +// MarshalJSON encodes the resource attributes as a JSON list of { "Key": +// "...", "Value": ... } pairs in order sorted by key. func (r *Resource) MarshalJSON() ([]byte, error) { if r == nil { r = Empty() } - return r.labels.MarshalJSON() + return r.attrs.MarshalJSON() } // Len returns the number of unique key-values in this Resource. @@ -141,15 +254,13 @@ func (r *Resource) Len() int { if r == nil { return 0 } - return r.labels.Len() + return r.attrs.Len() } -// Encoded returns an encoded representation of the resource by -// applying a label encoder. The result is cached by the underlying -// label set. -func (r *Resource) Encoded(enc label.Encoder) string { +// Encoded returns an encoded representation of the resource. +func (r *Resource) Encoded(enc attribute.Encoder) string { if r == nil { return "" } - return r.labels.Encoded(enc) + return r.attrs.Encoded(enc) } diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/attributesmap.go b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/attributesmap.go index fc1aa8f43..b891c8178 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/attributesmap.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/attributesmap.go @@ -17,18 +17,17 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "container/list" - "go.opentelemetry.io/otel/label" - "go.opentelemetry.io/otel/sdk/export/trace" + "go.opentelemetry.io/otel/attribute" ) // attributesMap is a capped map of attributes, holding the most recent attributes. // Eviction is done via a LRU method, the oldest entry is removed to create room for a new entry. -// Updates are allowed and refreshes the usage of the key. +// Updates are allowed and they refresh the usage of the key. // // This is based from https://github.com/hashicorp/golang-lru/blob/master/simplelru/lru.go -// With a subset of the its operations and specific for holding label.KeyValue +// With a subset of the its operations and specific for holding attribute.KeyValue type attributesMap struct { - attributes map[label.Key]*list.Element + attributes map[attribute.Key]*list.Element evictList *list.List droppedCount int capacity int @@ -36,14 +35,14 @@ type attributesMap struct { func newAttributesMap(capacity int) *attributesMap { lm := &attributesMap{ - attributes: make(map[label.Key]*list.Element), + attributes: make(map[attribute.Key]*list.Element), evictList: list.New(), capacity: capacity, } return lm } -func (am *attributesMap) add(kv label.KeyValue) { +func (am *attributesMap) add(kv attribute.KeyValue) { // Check for existing item if ent, ok := am.attributes[kv.Key]; ok { am.evictList.MoveToFront(ent) @@ -62,21 +61,23 @@ func (am *attributesMap) add(kv label.KeyValue) { } } -func (am *attributesMap) toSpanData(sd *trace.SpanData) { +// toKeyValue copies the attributesMap into a slice of attribute.KeyValue and +// returns it. If the map is empty, a nil is returned. +// TODO: Is it more efficient to return a pointer to the slice? +func (am *attributesMap) toKeyValue() []attribute.KeyValue { len := am.evictList.Len() if len == 0 { - return + return nil } - attributes := make([]label.KeyValue, 0, len) + attributes := make([]attribute.KeyValue, 0, len) for ent := am.evictList.Back(); ent != nil; ent = ent.Prev() { - if value, ok := ent.Value.(*label.KeyValue); ok { + if value, ok := ent.Value.(*attribute.KeyValue); ok { attributes = append(attributes, *value) } } - sd.Attributes = attributes - sd.DroppedAttributeCount = am.droppedCount + return attributes } // removeOldest removes the oldest item from the cache. @@ -84,7 +85,7 @@ func (am *attributesMap) removeOldest() { ent := am.evictList.Back() if ent != nil { am.evictList.Remove(ent) - kv := ent.Value.(*label.KeyValue) + kv := ent.Value.(*attribute.KeyValue) delete(am.attributes, kv.Key) } } diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go index cd13733fa..db873c16f 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -22,12 +22,13 @@ import ( "time" "go.opentelemetry.io/otel" - export "go.opentelemetry.io/otel/sdk/export/trace" ) +// Defaults for BatchSpanProcessorOptions. const ( DefaultMaxQueueSize = 2048 DefaultBatchTimeout = 5000 * time.Millisecond + DefaultExportTimeout = 30000 * time.Millisecond DefaultMaxExportBatchSize = 512 ) @@ -44,6 +45,11 @@ type BatchSpanProcessorOptions struct { // The default value of BatchTimeout is 5000 msec. BatchTimeout time.Duration + // ExportTimeout specifies the maximum duration for exporting spans. If the timeout + // is reached, the export will be cancelled. + // The default value of ExportTimeout is 30000 msec. + ExportTimeout time.Duration + // MaxExportBatchSize is the maximum number of spans to process in a single batch. // If there are more than one batch worth of spans then it processes multiple batches // of spans one batch after the other without any delay. @@ -57,16 +63,16 @@ type BatchSpanProcessorOptions struct { BlockOnQueueFull bool } -// BatchSpanProcessor is a SpanProcessor that batches asynchronously received -// SpanData and sends it to a trace.Exporter when complete. -type BatchSpanProcessor struct { - e export.SpanExporter +// batchSpanProcessor is a SpanProcessor that batches asynchronously-received +// spans and sends them to a trace.Exporter when complete. +type batchSpanProcessor struct { + e SpanExporter o BatchSpanProcessorOptions - queue chan *export.SpanData + queue chan ReadOnlySpan dropped uint32 - batch []*export.SpanData + batch []ReadOnlySpan batchMutex sync.Mutex timer *time.Timer stopWait sync.WaitGroup @@ -74,30 +80,28 @@ type BatchSpanProcessor struct { stopCh chan struct{} } -var _ SpanProcessor = (*BatchSpanProcessor)(nil) +var _ SpanProcessor = (*batchSpanProcessor)(nil) -// NewBatchSpanProcessor creates a new BatchSpanProcessor that will send -// SpanData batches to the exporters with the supplied options. -// -// The returned BatchSpanProcessor needs to be registered with the SDK using -// the RegisterSpanProcessor method for it to process spans. +// NewBatchSpanProcessor creates a new SpanProcessor that will send completed +// span batches to the exporter with the supplied options. // // If the exporter is nil, the span processor will preform no action. -func NewBatchSpanProcessor(exporter export.SpanExporter, options ...BatchSpanProcessorOption) *BatchSpanProcessor { +func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorOption) SpanProcessor { o := BatchSpanProcessorOptions{ BatchTimeout: DefaultBatchTimeout, + ExportTimeout: DefaultExportTimeout, MaxQueueSize: DefaultMaxQueueSize, MaxExportBatchSize: DefaultMaxExportBatchSize, } for _, opt := range options { opt(&o) } - bsp := &BatchSpanProcessor{ + bsp := &batchSpanProcessor{ e: exporter, o: o, - batch: make([]*export.SpanData, 0, o.MaxExportBatchSize), + batch: make([]ReadOnlySpan, 0, o.MaxExportBatchSize), timer: time.NewTimer(o.BatchTimeout), - queue: make(chan *export.SpanData, o.MaxQueueSize), + queue: make(chan ReadOnlySpan, o.MaxQueueSize), stopCh: make(chan struct{}), } @@ -112,26 +116,31 @@ func NewBatchSpanProcessor(exporter export.SpanExporter, options ...BatchSpanPro } // OnStart method does nothing. -func (bsp *BatchSpanProcessor) OnStart(parent context.Context, sd *export.SpanData) {} +func (bsp *batchSpanProcessor) OnStart(parent context.Context, s ReadWriteSpan) {} -// OnEnd method enqueues export.SpanData for later processing. -func (bsp *BatchSpanProcessor) OnEnd(sd *export.SpanData) { +// OnEnd method enqueues a ReadOnlySpan for later processing. +func (bsp *batchSpanProcessor) OnEnd(s ReadOnlySpan) { // Do not enqueue spans if we are just going to drop them. if bsp.e == nil { return } - bsp.enqueue(sd) + bsp.enqueue(s) } // Shutdown flushes the queue and waits until all spans are processed. // It only executes once. Subsequent call does nothing. -func (bsp *BatchSpanProcessor) Shutdown(ctx context.Context) error { +func (bsp *batchSpanProcessor) Shutdown(ctx context.Context) error { var err error bsp.stopOnce.Do(func() { wait := make(chan struct{}) go func() { close(bsp.stopCh) bsp.stopWait.Wait() + if bsp.e != nil { + if err := bsp.e.Shutdown(ctx); err != nil { + otel.Handle(err) + } + } close(wait) }() // Wait until the wait group is done or the context is cancelled @@ -145,8 +154,22 @@ func (bsp *BatchSpanProcessor) Shutdown(ctx context.Context) error { } // ForceFlush exports all ended spans that have not yet been exported. -func (bsp *BatchSpanProcessor) ForceFlush() { - bsp.exportSpans() +func (bsp *batchSpanProcessor) ForceFlush(ctx context.Context) error { + var err error + if bsp.e != nil { + wait := make(chan error) + go func() { + wait <- bsp.exportSpans(ctx) + close(wait) + }() + // Wait until the export is finished or the context is cancelled/timed out + select { + case err = <-wait: + case <-ctx.Done(): + err = ctx.Err() + } + } + return err } func WithMaxQueueSize(size int) BatchSpanProcessorOption { @@ -167,6 +190,12 @@ func WithBatchTimeout(delay time.Duration) BatchSpanProcessorOption { } } +func WithExportTimeout(timeout time.Duration) BatchSpanProcessorOption { + return func(o *BatchSpanProcessorOptions) { + o.ExportTimeout = timeout + } +} + func WithBlocking() BatchSpanProcessorOption { return func(o *BatchSpanProcessorOptions) { o.BlockOnQueueFull = true @@ -174,42 +203,62 @@ func WithBlocking() BatchSpanProcessorOption { } // exportSpans is a subroutine of processing and draining the queue. -func (bsp *BatchSpanProcessor) exportSpans() { +func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { bsp.timer.Reset(bsp.o.BatchTimeout) bsp.batchMutex.Lock() defer bsp.batchMutex.Unlock() - if len(bsp.batch) > 0 { - if err := bsp.e.ExportSpans(context.Background(), bsp.batch); err != nil { - otel.Handle(err) - } + if bsp.o.ExportTimeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, bsp.o.ExportTimeout) + defer cancel() + } + + if l := len(bsp.batch); l > 0 { + err := bsp.e.ExportSpans(ctx, bsp.batch) + + // A new batch is always created after exporting, even if the batch failed to be exported. + // + // It is up to the exporter to implement any type of retry logic if a batch is failing + // to be exported, since it is specific to the protocol and backend being sent to. bsp.batch = bsp.batch[:0] + + if err != nil { + return err + } } + return nil } // processQueue removes spans from the `queue` channel until processor // is shut down. It calls the exporter in batches of up to MaxExportBatchSize // waiting up to BatchTimeout to form a batch. -func (bsp *BatchSpanProcessor) processQueue() { +func (bsp *batchSpanProcessor) processQueue() { defer bsp.timer.Stop() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() for { select { case <-bsp.stopCh: return case <-bsp.timer.C: - bsp.exportSpans() + if err := bsp.exportSpans(ctx); err != nil { + otel.Handle(err) + } case sd := <-bsp.queue: bsp.batchMutex.Lock() bsp.batch = append(bsp.batch, sd) - shouldExport := len(bsp.batch) == bsp.o.MaxExportBatchSize + shouldExport := len(bsp.batch) >= bsp.o.MaxExportBatchSize bsp.batchMutex.Unlock() if shouldExport { if !bsp.timer.Stop() { <-bsp.timer.C } - bsp.exportSpans() + if err := bsp.exportSpans(ctx); err != nil { + otel.Handle(err) + } } } } @@ -217,12 +266,16 @@ func (bsp *BatchSpanProcessor) processQueue() { // drainQueue awaits the any caller that had added to bsp.stopWait // to finish the enqueue, then exports the final batch. -func (bsp *BatchSpanProcessor) drainQueue() { +func (bsp *batchSpanProcessor) drainQueue() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() for { select { case sd := <-bsp.queue: if sd == nil { - bsp.exportSpans() + if err := bsp.exportSpans(ctx); err != nil { + otel.Handle(err) + } return } @@ -232,7 +285,9 @@ func (bsp *BatchSpanProcessor) drainQueue() { bsp.batchMutex.Unlock() if shouldExport { - bsp.exportSpans() + if err := bsp.exportSpans(ctx); err != nil { + otel.Handle(err) + } } default: close(bsp.queue) @@ -240,8 +295,8 @@ func (bsp *BatchSpanProcessor) drainQueue() { } } -func (bsp *BatchSpanProcessor) enqueue(sd *export.SpanData) { - if !sd.SpanContext.IsSampled() { +func (bsp *batchSpanProcessor) enqueue(sd ReadOnlySpan) { + if !sd.SpanContext().IsSampled() { return } diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/config.go b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/config.go index 42b1851f8..61a304392 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/config.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/config.go @@ -14,38 +14,55 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" -import ( - "go.opentelemetry.io/otel/sdk/resource" -) - -// Config represents the global tracing configuration. -type Config struct { - // DefaultSampler is the default sampler used when creating new spans. - DefaultSampler Sampler +// SpanLimits represents the limits of a span. +type SpanLimits struct { + // AttributeCountLimit is the maximum allowed span attribute count. + AttributeCountLimit int - // IDGenerator is for internal use only. - IDGenerator IDGenerator + // EventCountLimit is the maximum allowed span event count. + EventCountLimit int - // MaxEventsPerSpan is max number of message events per span - MaxEventsPerSpan int + // LinkCountLimit is the maximum allowed span link count. + LinkCountLimit int - // MaxAnnotationEventsPerSpan is max number of attributes per span - MaxAttributesPerSpan int + // AttributePerEventCountLimit is the maximum allowed attribute per span event count. + AttributePerEventCountLimit int - // MaxLinksPerSpan is max number of links per span - MaxLinksPerSpan int + // AttributePerLinkCountLimit is the maximum allowed attribute per span link count. + AttributePerLinkCountLimit int +} - // Resource contains attributes representing an entity that produces telemetry. - Resource *resource.Resource +func (sl *SpanLimits) ensureDefault() { + if sl.EventCountLimit <= 0 { + sl.EventCountLimit = DefaultEventCountLimit + } + if sl.AttributeCountLimit <= 0 { + sl.AttributeCountLimit = DefaultAttributeCountLimit + } + if sl.LinkCountLimit <= 0 { + sl.LinkCountLimit = DefaultLinkCountLimit + } + if sl.AttributePerEventCountLimit <= 0 { + sl.AttributePerEventCountLimit = DefaultAttributePerEventCountLimit + } + if sl.AttributePerLinkCountLimit <= 0 { + sl.AttributePerLinkCountLimit = DefaultAttributePerLinkCountLimit + } } const ( - // DefaultMaxEventsPerSpan is default max number of message events per span - DefaultMaxEventsPerSpan = 1000 + // DefaultAttributeCountLimit is the default maximum allowed span attribute count. + DefaultAttributeCountLimit = 128 + + // DefaultEventCountLimit is the default maximum allowed span event count. + DefaultEventCountLimit = 128 + + // DefaultLinkCountLimit is the default maximum allowed span link count. + DefaultLinkCountLimit = 128 - // DefaultMaxAttributesPerSpan is default max number of attributes per span - DefaultMaxAttributesPerSpan = 1000 + // DefaultAttributePerEventCountLimit is the default maximum allowed attribute per span event count. + DefaultAttributePerEventCountLimit = 128 - // DefaultMaxLinksPerSpan is default max number of links per span - DefaultMaxLinksPerSpan = 1000 + // DefaultAttributePerLinkCountLimit is the default maximum allowed attribute per span link count. + DefaultAttributePerLinkCountLimit = 128 ) diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go index fd6ead864..0285e99be 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go @@ -15,10 +15,6 @@ /* Package trace contains support for OpenTelemetry distributed tracing. -This package is currently in a pre-GA phase. Backwards incompatible changes -may be introduced in subsequent minor version releases as we work to track the -evolving OpenTelemetry specification and user feedback. - The following assumes a basic familiarity with OpenTelemetry concepts. See https://opentelemetry.io. */ diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/internal/trace/noop/noop.go b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/event.go similarity index 56% rename from src/runtime/vendor/go.opentelemetry.io/otel/internal/trace/noop/noop.go rename to src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/event.go index 765c21a28..4eb556c53 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/internal/trace/noop/noop.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/event.go @@ -12,24 +12,26 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package noop provides noop tracing implementations for tracer and span. -package noop +package trace import ( - "context" + "time" - "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/attribute" ) -var ( - // Tracer is a noop tracer that starts noop spans. - Tracer trace.Tracer +// Event is a thing that happened during a Span's lifetime. +type Event struct { + // Name is the name of this event + Name string - // Span is a noop Span. - Span trace.Span -) + // Attributes describe the aspects of the event. + Attributes []attribute.KeyValue + + // DroppedAttributeCount is the number of attributes that were not + // recorded due to configured limits being reached. + DroppedAttributeCount int -func init() { - Tracer = trace.NewNoopTracerProvider().Tracer("") - _, Span = Tracer.Start(context.Background(), "") + // Time at which this event was recorded. + Time time.Time } diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go index e60a421cd..c9e2802ac 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go @@ -26,8 +26,18 @@ import ( // IDGenerator allows custom generators for TraceID and SpanID. type IDGenerator interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // NewIDs returns a new trace and span ID. NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // NewSpanID returns a ID for a new span in the trace with traceID. NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. } type randomIDGenerator struct { diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/link.go b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/link.go new file mode 100644 index 000000000..19cfea4ba --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/link.go @@ -0,0 +1,34 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// Link is the relationship between two Spans. The relationship can be within +// the same Trace or across different Traces. +type Link struct { + // SpanContext of the linked Span. + SpanContext trace.SpanContext + + // Attributes describe the aspects of the link. + Attributes []attribute.KeyValue + + // DroppedAttributeCount is the number of attributes that were not + // recorded due to configured limits being reached. + DroppedAttributeCount int +} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go index 4c6e6e2ec..3a32445bc 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -16,13 +16,13 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" + "fmt" "sync" "sync/atomic" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/trace" - export "go.opentelemetry.io/otel/sdk/export/trace" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/resource" ) @@ -31,58 +31,81 @@ const ( defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer" ) -// TODO (MrAlias): unify this API option design: -// https://github.com/open-telemetry/opentelemetry-go/issues/536 - -// TracerProviderConfig -type TracerProviderConfig struct { +// tracerProviderConfig +type tracerProviderConfig struct { + // processors contains collection of SpanProcessors that are processing pipeline + // for spans in the trace signal. + // SpanProcessors registered with a TracerProvider and are called at the start + // and end of a Span's lifecycle, and are called in the order they are + // registered. processors []SpanProcessor - config Config -} -type TracerProviderOption func(*TracerProviderConfig) + // sampler is the default sampler used when creating new spans. + sampler Sampler + + // idGenerator is used to generate all Span and Trace IDs when needed. + idGenerator IDGenerator + + // spanLimits defines the attribute, event, and link limits for spans. + spanLimits SpanLimits + + // resource contains attributes representing an entity that produces telemetry. + resource *resource.Resource +} type TracerProvider struct { mu sync.Mutex namedTracer map[instrumentation.Library]*tracer spanProcessors atomic.Value - config atomic.Value // access atomically + sampler Sampler + idGenerator IDGenerator + spanLimits SpanLimits + resource *resource.Resource } var _ trace.TracerProvider = &TracerProvider{} -// NewTracerProvider creates an instance of trace provider. Optional -// parameter configures the provider with common options applicable -// to all tracer instances that will be created by this provider. +// NewTracerProvider returns a new and configured TracerProvider. +// +// By default the returned TracerProvider is configured with: +// - a ParentBased(AlwaysSample) Sampler +// - a random number IDGenerator +// - the resource.Default() Resource +// - the default SpanLimits. +// +// The passed opts are used to override these default values and configure the +// returned TracerProvider appropriately. func NewTracerProvider(opts ...TracerProviderOption) *TracerProvider { - o := &TracerProviderConfig{} + o := &tracerProviderConfig{} for _, opt := range opts { - opt(o) + opt.apply(o) } + ensureValidTracerProviderConfig(o) + tp := &TracerProvider{ namedTracer: make(map[instrumentation.Library]*tracer), + sampler: o.sampler, + idGenerator: o.idGenerator, + spanLimits: o.spanLimits, + resource: o.resource, } - tp.config.Store(&Config{ - DefaultSampler: ParentBased(AlwaysSample()), - IDGenerator: defaultIDGenerator(), - MaxAttributesPerSpan: DefaultMaxAttributesPerSpan, - MaxEventsPerSpan: DefaultMaxEventsPerSpan, - MaxLinksPerSpan: DefaultMaxLinksPerSpan, - }) for _, sp := range o.processors { tp.RegisterSpanProcessor(sp) } - tp.ApplyConfig(o.config) - return tp } -// Tracer with the given name. If a tracer for the given name does not exist, -// it is created first. If the name is empty, DefaultTracerName is used. +// Tracer returns a Tracer with the given name and options. If a Tracer for +// the given name and options does not exist it is created, otherwise the +// existing Tracer is returned. +// +// If name is empty, DefaultTracerName is used instead. +// +// This method is safe to be called concurrently. func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { c := trace.NewTracerConfig(opts...) @@ -92,8 +115,9 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T name = defaultTracerName } il := instrumentation.Library{ - Name: name, - Version: c.InstrumentationVersion, + Name: name, + Version: c.InstrumentationVersion(), + SchemaURL: c.SchemaURL(), } t, ok := p.namedTracer[il] if !ok { @@ -126,17 +150,17 @@ func (p *TracerProvider) RegisterSpanProcessor(s SpanProcessor) { func (p *TracerProvider) UnregisterSpanProcessor(s SpanProcessor) { p.mu.Lock() defer p.mu.Unlock() - new := spanProcessorStates{} + spss := spanProcessorStates{} old, ok := p.spanProcessors.Load().(spanProcessorStates) if !ok || len(old) == 0 { return } - new = append(new, old...) + spss = append(spss, old...) // stop the span processor if it is started and remove it from the list var stopOnce *spanProcessorState var idx int - for i, sps := range new { + for i, sps := range spss { if sps.sp == s { stopOnce = sps idx = i @@ -144,97 +168,178 @@ func (p *TracerProvider) UnregisterSpanProcessor(s SpanProcessor) { } if stopOnce != nil { stopOnce.state.Do(func() { - otel.Handle(s.Shutdown(context.Background())) + if err := s.Shutdown(context.Background()); err != nil { + otel.Handle(err) + } }) } - if len(new) > 1 { - copy(new[idx:], new[idx+1:]) + if len(spss) > 1 { + copy(spss[idx:], spss[idx+1:]) } - new[len(new)-1] = nil - new = new[:len(new)-1] + spss[len(spss)-1] = nil + spss = spss[:len(spss)-1] - p.spanProcessors.Store(new) + p.spanProcessors.Store(spss) } -// ApplyConfig changes the configuration of the provider. -// If a field in the configuration is empty or nil then its original value is preserved. -func (p *TracerProvider) ApplyConfig(cfg Config) { - p.mu.Lock() - defer p.mu.Unlock() - c := *p.config.Load().(*Config) - if cfg.DefaultSampler != nil { - c.DefaultSampler = cfg.DefaultSampler - } - if cfg.IDGenerator != nil { - c.IDGenerator = cfg.IDGenerator - } - if cfg.MaxEventsPerSpan > 0 { - c.MaxEventsPerSpan = cfg.MaxEventsPerSpan - } - if cfg.MaxAttributesPerSpan > 0 { - c.MaxAttributesPerSpan = cfg.MaxAttributesPerSpan +// ForceFlush immediately exports all spans that have not yet been exported for +// all the registered span processors. +func (p *TracerProvider) ForceFlush(ctx context.Context) error { + spss, ok := p.spanProcessors.Load().(spanProcessorStates) + if !ok { + return fmt.Errorf("failed to load span processors") } - if cfg.MaxLinksPerSpan > 0 { - c.MaxLinksPerSpan = cfg.MaxLinksPerSpan + if len(spss) == 0 { + return nil } - if cfg.Resource != nil { - c.Resource = cfg.Resource + + for _, sps := range spss { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + if err := sps.sp.ForceFlush(ctx); err != nil { + return err + } } - p.config.Store(&c) + return nil } -// Shutdown shuts down the span processors in the order they were registered +// Shutdown shuts down the span processors in the order they were registered. func (p *TracerProvider) Shutdown(ctx context.Context) error { spss, ok := p.spanProcessors.Load().(spanProcessorStates) - if !ok || len(spss) == 0 { + if !ok { + return fmt.Errorf("failed to load span processors") + } + if len(spss) == 0 { return nil } for _, sps := range spss { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + var err error sps.state.Do(func() { - otel.Handle(sps.sp.Shutdown(ctx)) + err = sps.sp.Shutdown(ctx) }) + if err != nil { + return err + } } return nil } +type TracerProviderOption interface { + apply(*tracerProviderConfig) +} + +type traceProviderOptionFunc func(*tracerProviderConfig) + +func (fn traceProviderOptionFunc) apply(cfg *tracerProviderConfig) { + fn(cfg) +} + // WithSyncer registers the exporter with the TracerProvider using a // SimpleSpanProcessor. -func WithSyncer(e export.SpanExporter) TracerProviderOption { +// +// This is not recommended for production use. The synchronous nature of the +// SimpleSpanProcessor that will wrap the exporter make it good for testing, +// debugging, or showing examples of other feature, but it will be slow and +// have a high computation resource usage overhead. The WithBatcher option is +// recommended for production use instead. +func WithSyncer(e SpanExporter) TracerProviderOption { return WithSpanProcessor(NewSimpleSpanProcessor(e)) } // WithBatcher registers the exporter with the TracerProvider using a // BatchSpanProcessor configured with the passed opts. -func WithBatcher(e export.SpanExporter, opts ...BatchSpanProcessorOption) TracerProviderOption { +func WithBatcher(e SpanExporter, opts ...BatchSpanProcessorOption) TracerProviderOption { return WithSpanProcessor(NewBatchSpanProcessor(e, opts...)) } // WithSpanProcessor registers the SpanProcessor with a TracerProvider. func WithSpanProcessor(sp SpanProcessor) TracerProviderOption { - return func(opts *TracerProviderConfig) { - opts.processors = append(opts.processors, sp) - } -} - -// WithConfig option sets the configuration to provider. -func WithConfig(config Config) TracerProviderOption { - return func(opts *TracerProviderConfig) { - opts.config = config - } + return traceProviderOptionFunc(func(cfg *tracerProviderConfig) { + cfg.processors = append(cfg.processors, sp) + }) } -// WithResource option attaches a resource to the provider. -// The resource is added to the span when it is started. +// WithResource returns a TracerProviderOption that will configure the +// Resource r as a TracerProvider's Resource. The configured Resource is +// referenced by all the Tracers the TracerProvider creates. It represents the +// entity producing telemetry. +// +// If this option is not used, the TracerProvider will use the +// resource.Default() Resource by default. func WithResource(r *resource.Resource) TracerProviderOption { - return func(opts *TracerProviderConfig) { - opts.config.Resource = r - } + return traceProviderOptionFunc(func(cfg *tracerProviderConfig) { + var err error + cfg.resource, err = resource.Merge(resource.Environment(), r) + if err != nil { + otel.Handle(err) + } + }) } -// WithIDGenerator option registers an IDGenerator with the TracerProvider. +// WithIDGenerator returns a TracerProviderOption that will configure the +// IDGenerator g as a TracerProvider's IDGenerator. The configured IDGenerator +// is used by the Tracers the TracerProvider creates to generate new Span and +// Trace IDs. +// +// If this option is not used, the TracerProvider will use a random number +// IDGenerator by default. func WithIDGenerator(g IDGenerator) TracerProviderOption { - return func(opts *TracerProviderConfig) { - opts.config.IDGenerator = g + return traceProviderOptionFunc(func(cfg *tracerProviderConfig) { + if g != nil { + cfg.idGenerator = g + } + }) +} + +// WithSampler returns a TracerProviderOption that will configure the Sampler +// s as a TracerProvider's Sampler. The configured Sampler is used by the +// Tracers the TracerProvider creates to make their sampling decisions for the +// Spans they create. +// +// If this option is not used, the TracerProvider will use a +// ParentBased(AlwaysSample) Sampler by default. +func WithSampler(s Sampler) TracerProviderOption { + return traceProviderOptionFunc(func(cfg *tracerProviderConfig) { + if s != nil { + cfg.sampler = s + } + }) +} + +// WithSpanLimits returns a TracerProviderOption that will configure the +// SpanLimits sl as a TracerProvider's SpanLimits. The configured SpanLimits +// are used used by the Tracers the TracerProvider and the Spans they create +// to limit tracing resources used. +// +// If this option is not used, the TracerProvider will use the default +// SpanLimits. +func WithSpanLimits(sl SpanLimits) TracerProviderOption { + return traceProviderOptionFunc(func(cfg *tracerProviderConfig) { + cfg.spanLimits = sl + }) +} + +// ensureValidTracerProviderConfig ensures that given TracerProviderConfig is valid. +func ensureValidTracerProviderConfig(cfg *tracerProviderConfig) { + if cfg.sampler == nil { + cfg.sampler = ParentBased(AlwaysSample()) + } + if cfg.idGenerator == nil { + cfg.idGenerator = defaultIDGenerator() + } + cfg.spanLimits.ensureDefault() + if cfg.resource == nil { + cfg.resource = resource.Default() } } diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go index e66dd65ec..849248638 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go @@ -15,28 +15,39 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( + "context" "encoding/binary" "fmt" - "go.opentelemetry.io/otel/label" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" ) // Sampler decides whether a trace should be sampled and exported. type Sampler interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // ShouldSample returns a SamplingResult based on a decision made from the + // passed parameters. ShouldSample(parameters SamplingParameters) SamplingResult + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Description returns information describing the Sampler. Description() string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. } // SamplingParameters contains the values passed to a Sampler. type SamplingParameters struct { - ParentContext trace.SpanContext - TraceID trace.TraceID - Name string - HasRemoteParent bool - Kind trace.SpanKind - Attributes []label.KeyValue - Links []trace.Link + ParentContext context.Context + TraceID trace.TraceID + Name string + Kind trace.SpanKind + Attributes []attribute.KeyValue + Links []trace.Link } // SamplingDecision indicates whether a span is dropped, recorded and/or sampled. @@ -56,10 +67,11 @@ const ( RecordAndSample ) -// SamplingResult conveys a SamplingDecision and a set of Attributes. +// SamplingResult conveys a SamplingDecision, set of Attributes and a Tracestate. type SamplingResult struct { Decision SamplingDecision - Attributes []label.KeyValue + Attributes []attribute.KeyValue + Tracestate trace.TraceState } type traceIDRatioSampler struct { @@ -68,11 +80,18 @@ type traceIDRatioSampler struct { } func (ts traceIDRatioSampler) ShouldSample(p SamplingParameters) SamplingResult { + psc := trace.SpanContextFromContext(p.ParentContext) x := binary.BigEndian.Uint64(p.TraceID[0:8]) >> 1 if x < ts.traceIDUpperBound { - return SamplingResult{Decision: RecordAndSample} + return SamplingResult{ + Decision: RecordAndSample, + Tracestate: psc.TraceState(), + } + } + return SamplingResult{ + Decision: Drop, + Tracestate: psc.TraceState(), } - return SamplingResult{Decision: Drop} } func (ts traceIDRatioSampler) Description() string { @@ -83,7 +102,7 @@ func (ts traceIDRatioSampler) Description() string { // always sample. Fractions < 0 are treated as zero. To respect the // parent trace's `SampledFlag`, the `TraceIDRatioBased` sampler should be used // as a delegate of a `Parent` sampler. -//nolint:golint // golint complains about stutter of `trace.TraceIDRatioBased` +//nolint:revive // revive complains about stutter of `trace.TraceIDRatioBased` func TraceIDRatioBased(fraction float64) Sampler { if fraction >= 1 { return AlwaysSample() @@ -102,7 +121,10 @@ func TraceIDRatioBased(fraction float64) Sampler { type alwaysOnSampler struct{} func (as alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult { - return SamplingResult{Decision: RecordAndSample} + return SamplingResult{ + Decision: RecordAndSample, + Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(), + } } func (as alwaysOnSampler) Description() string { @@ -120,7 +142,10 @@ func AlwaysSample() Sampler { type alwaysOffSampler struct{} func (as alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult { - return SamplingResult{Decision: Drop} + return SamplingResult{ + Decision: Drop, + Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(), + } } func (as alwaysOffSampler) Description() string { @@ -150,11 +175,11 @@ func ParentBased(root Sampler, samplers ...ParentBasedSamplerOption) Sampler { type parentBased struct { root Sampler - config config + config samplerConfig } -func configureSamplersForParentBased(samplers []ParentBasedSamplerOption) config { - c := config{ +func configureSamplersForParentBased(samplers []ParentBasedSamplerOption) samplerConfig { + c := samplerConfig{ remoteParentSampled: AlwaysSample(), remoteParentNotSampled: NeverSample(), localParentSampled: AlwaysSample(), @@ -162,21 +187,21 @@ func configureSamplersForParentBased(samplers []ParentBasedSamplerOption) config } for _, so := range samplers { - so.Apply(&c) + so.apply(&c) } return c } -// config is a group of options for parentBased sampler. -type config struct { +// samplerConfig is a group of options for parentBased sampler. +type samplerConfig struct { remoteParentSampled, remoteParentNotSampled Sampler localParentSampled, localParentNotSampled Sampler } // ParentBasedSamplerOption configures the sampler for a particular sampling case. type ParentBasedSamplerOption interface { - Apply(*config) + apply(*samplerConfig) } // WithRemoteParentSampled sets the sampler for the case of sampled remote parent. @@ -188,7 +213,7 @@ type remoteParentSampledOption struct { s Sampler } -func (o remoteParentSampledOption) Apply(config *config) { +func (o remoteParentSampledOption) apply(config *samplerConfig) { config.remoteParentSampled = o.s } @@ -202,7 +227,7 @@ type remoteParentNotSampledOption struct { s Sampler } -func (o remoteParentNotSampledOption) Apply(config *config) { +func (o remoteParentNotSampledOption) apply(config *samplerConfig) { config.remoteParentNotSampled = o.s } @@ -215,7 +240,7 @@ type localParentSampledOption struct { s Sampler } -func (o localParentSampledOption) Apply(config *config) { +func (o localParentSampledOption) apply(config *samplerConfig) { config.localParentSampled = o.s } @@ -229,20 +254,21 @@ type localParentNotSampledOption struct { s Sampler } -func (o localParentNotSampledOption) Apply(config *config) { +func (o localParentNotSampledOption) apply(config *samplerConfig) { config.localParentNotSampled = o.s } func (pb parentBased) ShouldSample(p SamplingParameters) SamplingResult { - if p.ParentContext.IsValid() { - if p.HasRemoteParent { - if p.ParentContext.IsSampled() { + psc := trace.SpanContextFromContext(p.ParentContext) + if psc.IsValid() { + if psc.IsRemote() { + if psc.IsSampled() { return pb.config.remoteParentSampled.ShouldSample(p) } return pb.config.remoteParentNotSampled.ShouldSample(p) } - if p.ParentContext.IsSampled() { + if psc.IsSampled() { return pb.config.localParentSampled.ShouldSample(p) } return pb.config.localParentNotSampled.ShouldSample(p) diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go index c60e99035..9dc87f893 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go @@ -16,46 +16,88 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" + "sync" "go.opentelemetry.io/otel" - export "go.opentelemetry.io/otel/sdk/export/trace" ) -// SimpleSpanProcessor is a SpanProcessor that synchronously sends all -// SpanData to a trace.Exporter when the span finishes. -type SimpleSpanProcessor struct { - e export.SpanExporter +// simpleSpanProcessor is a SpanProcessor that synchronously sends all +// completed Spans to a trace.Exporter immediately. +type simpleSpanProcessor struct { + exporterMu sync.RWMutex + exporter SpanExporter + stopOnce sync.Once } -var _ SpanProcessor = (*SimpleSpanProcessor)(nil) +var _ SpanProcessor = (*simpleSpanProcessor)(nil) -// NewSimpleSpanProcessor returns a new SimpleSpanProcessor that will -// synchronously send SpanData to the exporter. -func NewSimpleSpanProcessor(exporter export.SpanExporter) *SimpleSpanProcessor { - ssp := &SimpleSpanProcessor{ - e: exporter, +// NewSimpleSpanProcessor returns a new SpanProcessor that will synchronously +// send completed spans to the exporter immediately. +// +// This SpanProcessor is not recommended for production use. The synchronous +// nature of this SpanProcessor make it good for testing, debugging, or +// showing examples of other feature, but it will be slow and have a high +// computation resource usage overhead. The BatchSpanProcessor is recommended +// for production use instead. +func NewSimpleSpanProcessor(exporter SpanExporter) SpanProcessor { + ssp := &simpleSpanProcessor{ + exporter: exporter, } return ssp } -// OnStart method does nothing. -func (ssp *SimpleSpanProcessor) OnStart(parent context.Context, sd *export.SpanData) { -} +// OnStart does nothing. +func (ssp *simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} + +// OnEnd immediately exports a ReadOnlySpan. +func (ssp *simpleSpanProcessor) OnEnd(s ReadOnlySpan) { + ssp.exporterMu.RLock() + defer ssp.exporterMu.RUnlock() -// OnEnd method exports SpanData using associated export. -func (ssp *SimpleSpanProcessor) OnEnd(sd *export.SpanData) { - if ssp.e != nil && sd.SpanContext.IsSampled() { - if err := ssp.e.ExportSpans(context.Background(), []*export.SpanData{sd}); err != nil { + if ssp.exporter != nil && s.SpanContext().TraceFlags().IsSampled() { + if err := ssp.exporter.ExportSpans(context.Background(), []ReadOnlySpan{s}); err != nil { otel.Handle(err) } } } -// Shutdown method does nothing. There is no data to cleanup. -func (ssp *SimpleSpanProcessor) Shutdown(_ context.Context) error { - return nil +// Shutdown shuts down the exporter this SimpleSpanProcessor exports to. +func (ssp *simpleSpanProcessor) Shutdown(ctx context.Context) error { + var err error + ssp.stopOnce.Do(func() { + stopFunc := func(exp SpanExporter) (<-chan error, func()) { + done := make(chan error) + return done, func() { done <- exp.Shutdown(ctx) } + } + + // The exporter field of the simpleSpanProcessor needs to be zeroed to + // signal it is shut down, meaning all subsequent calls to OnEnd will + // be gracefully ignored. This needs to be done synchronously to avoid + // any race condition. + // + // A closure is used to keep reference to the exporter and then the + // field is zeroed. This ensures the simpleSpanProcessor is shut down + // before the exporter. This order is important as it avoids a + // potential deadlock. If the exporter shut down operation generates a + // span, that span would need to be exported. Meaning, OnEnd would be + // called and try acquiring the lock that is held here. + ssp.exporterMu.Lock() + done, shutdown := stopFunc(ssp.exporter) + ssp.exporter = nil + ssp.exporterMu.Unlock() + + go shutdown() + + select { + case err = <-done: + case <-ctx.Done(): + err = ctx.Err() + } + }) + return err } // ForceFlush does nothing as there is no data to flush. -func (ssp *SimpleSpanProcessor) ForceFlush() { +func (ssp *simpleSpanProcessor) ForceFlush(context.Context) error { + return nil } diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go new file mode 100644 index 000000000..68670bb0b --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go @@ -0,0 +1,138 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/resource" + "go.opentelemetry.io/otel/trace" +) + +// snapshot is an record of a spans state at a particular checkpointed time. +// It is used as a read-only representation of that state. +type snapshot struct { + name string + spanContext trace.SpanContext + parent trace.SpanContext + spanKind trace.SpanKind + startTime time.Time + endTime time.Time + attributes []attribute.KeyValue + events []Event + links []Link + status Status + childSpanCount int + droppedAttributeCount int + droppedEventCount int + droppedLinkCount int + resource *resource.Resource + instrumentationLibrary instrumentation.Library +} + +var _ ReadOnlySpan = snapshot{} + +func (s snapshot) private() {} + +// Name returns the name of the span. +func (s snapshot) Name() string { + return s.name +} + +// SpanContext returns the unique SpanContext that identifies the span. +func (s snapshot) SpanContext() trace.SpanContext { + return s.spanContext +} + +// Parent returns the unique SpanContext that identifies the parent of the +// span if one exists. If the span has no parent the returned SpanContext +// will be invalid. +func (s snapshot) Parent() trace.SpanContext { + return s.parent +} + +// SpanKind returns the role the span plays in a Trace. +func (s snapshot) SpanKind() trace.SpanKind { + return s.spanKind +} + +// StartTime returns the time the span started recording. +func (s snapshot) StartTime() time.Time { + return s.startTime +} + +// EndTime returns the time the span stopped recording. It will be zero if +// the span has not ended. +func (s snapshot) EndTime() time.Time { + return s.endTime +} + +// Attributes returns the defining attributes of the span. +func (s snapshot) Attributes() []attribute.KeyValue { + return s.attributes +} + +// Links returns all the links the span has to other spans. +func (s snapshot) Links() []Link { + return s.links +} + +// Events returns all the events that occurred within in the spans +// lifetime. +func (s snapshot) Events() []Event { + return s.events +} + +// Status returns the spans status. +func (s snapshot) Status() Status { + return s.status +} + +// InstrumentationLibrary returns information about the instrumentation +// library that created the span. +func (s snapshot) InstrumentationLibrary() instrumentation.Library { + return s.instrumentationLibrary +} + +// Resource returns information about the entity that produced the span. +func (s snapshot) Resource() *resource.Resource { + return s.resource +} + +// DroppedAttributes returns the number of attributes dropped by the span +// due to limits being reached. +func (s snapshot) DroppedAttributes() int { + return s.droppedAttributeCount +} + +// DroppedLinks returns the number of links dropped by the span due to limits +// being reached. +func (s snapshot) DroppedLinks() int { + return s.droppedLinkCount +} + +// DroppedEvents returns the number of events dropped by the span due to +// limits being reached. +func (s snapshot) DroppedEvents() int { + return s.droppedEventCount +} + +// ChildSpanCount returns the count of spans that consider the span a +// direct parent. +func (s snapshot) ChildSpanCount() int { + return s.childSpanCount +} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index fb3c6382c..cda164440 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -16,158 +16,294 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" - "errors" "fmt" "reflect" + "runtime" + rt "runtime/trace" "sync" "time" - "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/label" + semconv "go.opentelemetry.io/otel/semconv/v1.4.0" "go.opentelemetry.io/otel/trace" - export "go.opentelemetry.io/otel/sdk/export/trace" + "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/internal" + "go.opentelemetry.io/otel/sdk/resource" ) -const ( - errorTypeKey = label.Key("error.type") - errorMessageKey = label.Key("error.message") - errorEventName = "error" -) +// ReadOnlySpan allows reading information from the data structure underlying a +// trace.Span. It is used in places where reading information from a span is +// necessary but changing the span isn't necessary or allowed. +// +// Warning: methods may be added to this interface in minor releases. +type ReadOnlySpan interface { + // Name returns the name of the span. + Name() string + // SpanContext returns the unique SpanContext that identifies the span. + SpanContext() trace.SpanContext + // Parent returns the unique SpanContext that identifies the parent of the + // span if one exists. If the span has no parent the returned SpanContext + // will be invalid. + Parent() trace.SpanContext + // SpanKind returns the role the span plays in a Trace. + SpanKind() trace.SpanKind + // StartTime returns the time the span started recording. + StartTime() time.Time + // EndTime returns the time the span stopped recording. It will be zero if + // the span has not ended. + EndTime() time.Time + // Attributes returns the defining attributes of the span. + Attributes() []attribute.KeyValue + // Links returns all the links the span has to other spans. + Links() []Link + // Events returns all the events that occurred within in the spans + // lifetime. + Events() []Event + // Status returns the spans status. + Status() Status + // InstrumentationLibrary returns information about the instrumentation + // library that created the span. + InstrumentationLibrary() instrumentation.Library + // Resource returns information about the entity that produced the span. + Resource() *resource.Resource + // DroppedAttributes returns the number of attributes dropped by the span + // due to limits being reached. + DroppedAttributes() int + // DroppedLinks returns the number of links dropped by the span due to + // limits being reached. + DroppedLinks() int + // DroppedEvents returns the number of events dropped by the span due to + // limits being reached. + DroppedEvents() int + // ChildSpanCount returns the count of spans that consider the span a + // direct parent. + ChildSpanCount() int + + // A private method to prevent users implementing the + // interface and so future additions to it will not + // violate compatibility. + private() +} -var emptySpanContext = trace.SpanContext{} +// ReadWriteSpan exposes the same methods as trace.Span and in addition allows +// reading information from the underlying data structure. +// This interface exposes the union of the methods of trace.Span (which is a +// "write-only" span) and ReadOnlySpan. New methods for writing or reading span +// information should be added under trace.Span or ReadOnlySpan, respectively. +// +// Warning: methods may be added to this interface in minor releases. +type ReadWriteSpan interface { + trace.Span + ReadOnlySpan +} -// span is an implementation of the OpenTelemetry Span API representing the -// individual component of a trace. -type span struct { +// recordingSpan is an implementation of the OpenTelemetry Span API +// representing the individual component of a trace that is sampled. +type recordingSpan struct { // mu protects the contents of this span. mu sync.Mutex - // data contains information recorded about the span. - // - // It will be non-nil if we are exporting the span or recording events for it. - // Otherwise, data is nil, and the span is simply a carrier for the - // SpanContext, so that the trace ID is propagated. - data *export.SpanData + // parent holds the parent span of this span as a trace.SpanContext. + parent trace.SpanContext + + // spanKind represents the kind of this span as a trace.SpanKind. + spanKind trace.SpanKind + + // name is the name of this span. + name string + + // startTime is the time at which this span was started. + startTime time.Time + + // endTime is the time at which this span was ended. It contains the zero + // value of time.Time until the span is ended. + endTime time.Time + + // status is the status of this span. + status Status + + // childSpanCount holds the number of child spans created for this span. + childSpanCount int + + // resource contains attributes representing an entity that produced this + // span. + resource *resource.Resource + + // instrumentationLibrary defines the instrumentation library used to + // provide instrumentation. + instrumentationLibrary instrumentation.Library + + // spanContext holds the SpanContext of this span. spanContext trace.SpanContext - // attributes are capped at configured limit. When the capacity is reached an oldest entry - // is removed to create room for a new entry. + // attributes are capped at configured limit. When the capacity is reached + // an oldest entry is removed to create room for a new entry. attributes *attributesMap - // messageEvents are stored in FIFO queue capped by configured limit. - messageEvents *evictedQueue + // events are stored in FIFO queue capped by configured limit. + events *evictedQueue // links are stored in FIFO queue capped by configured limit. links *evictedQueue - // endOnce ensures End is only called once. - endOnce sync.Once - // executionTracerTaskEnd ends the execution tracer span. executionTracerTaskEnd func() // tracer is the SDK tracer that created this span. tracer *tracer + + // spanLimits holds the limits to this span. + spanLimits SpanLimits } -var _ trace.Span = &span{} +var _ ReadWriteSpan = (*recordingSpan)(nil) +var _ runtimeTracer = (*recordingSpan)(nil) -func (s *span) SpanContext() trace.SpanContext { +// SpanContext returns the SpanContext of this span. +func (s *recordingSpan) SpanContext() trace.SpanContext { if s == nil { return trace.SpanContext{} } return s.spanContext } -func (s *span) IsRecording() bool { +// IsRecording returns if this span is being recorded. If this span has ended +// this will return false. +func (s *recordingSpan) IsRecording() bool { if s == nil { return false } - return s.data != nil + s.mu.Lock() + defer s.mu.Unlock() + + return s.endTime.IsZero() } -func (s *span) SetStatus(code codes.Code, msg string) { - if s == nil { - return - } +// SetStatus sets the status of the Span in the form of a code and a +// description, overriding previous values set. The description is only +// included in the set status when the code is for an error. If this span is +// not being recorded than this method does nothing. +func (s *recordingSpan) SetStatus(code codes.Code, description string) { if !s.IsRecording() { return } + + status := Status{Code: code} + if code == codes.Error { + status.Description = description + } + s.mu.Lock() - s.data.StatusCode = code - s.data.StatusMessage = msg + s.status = status s.mu.Unlock() } -func (s *span) SetAttributes(attributes ...label.KeyValue) { +// SetAttributes sets attributes of this span. +// +// If a key from attributes already exists the value associated with that key +// will be overwritten with the value contained in attributes. +// +// If this span is not being recorded than this method does nothing. +func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) { if !s.IsRecording() { return } s.copyToCappedAttributes(attributes...) } -// End ends the span. +// End ends the span. This method does nothing if the span is already ended or +// is not being recorded. // // The only SpanOption currently supported is WithTimestamp which will set the // end time for a Span's life-cycle. // // If this method is called while panicking an error event is added to the // Span before ending it and the panic is continued. -func (s *span) End(options ...trace.SpanOption) { +func (s *recordingSpan) End(options ...trace.SpanEndOption) { + // Do not start by checking if the span is being recorded which requires + // acquiring a lock. Make a minimal check that the span is not nil. if s == nil { return } + // Store the end time as soon as possible to avoid artificially increasing + // the span's duration in case some operation below takes a while. + et := internal.MonotonicEndTime(s.startTime) + + // Do relative expensive check now that we have an end time and see if we + // need to do any more processing. + if !s.IsRecording() { + return + } + + config := trace.NewSpanEndConfig(options...) if recovered := recover(); recovered != nil { // Record but don't stop the panic. defer panic(recovered) - s.addEvent( - errorEventName, + opts := []trace.EventOption{ trace.WithAttributes( - errorTypeKey.String(typeStr(recovered)), - errorMessageKey.String(fmt.Sprint(recovered)), + semconv.ExceptionTypeKey.String(typeStr(recovered)), + semconv.ExceptionMessageKey.String(fmt.Sprint(recovered)), ), - ) + } + + if config.StackTrace() { + opts = append(opts, trace.WithAttributes( + semconv.ExceptionStacktraceKey.String(recordStackTrace()), + )) + } + + s.addEvent(semconv.ExceptionEventName, opts...) } if s.executionTracerTaskEnd != nil { s.executionTracerTaskEnd() } - if !s.IsRecording() { - return + + s.mu.Lock() + // Setting endTime to non-zero marks the span as ended and not recording. + if config.Timestamp().IsZero() { + s.endTime = et + } else { + s.endTime = config.Timestamp() } - config := trace.NewSpanConfig(options...) - s.endOnce.Do(func() { - sps, ok := s.tracer.provider.spanProcessors.Load().(spanProcessorStates) - mustExportOrProcess := ok && len(sps) > 0 - if mustExportOrProcess { - sd := s.makeSpanData() - if config.Timestamp.IsZero() { - sd.EndTime = internal.MonotonicEndTime(sd.StartTime) - } else { - sd.EndTime = config.Timestamp - } - for _, sp := range sps { - sp.sp.OnEnd(sd) - } + s.mu.Unlock() + + if sps, ok := s.tracer.provider.spanProcessors.Load().(spanProcessorStates); ok { + if len(sps) == 0 { + return } - }) + snap := s.snapshot() + for _, sp := range sps { + sp.sp.OnEnd(snap) + } + } } -func (s *span) RecordError(err error, opts ...trace.EventOption) { +// RecordError will record err as a span event for this span. An additional call to +// SetStatus is required if the Status of the Span should be set to Error, this method +// does not change the Span status. If this span is not being recorded or err is nil +// than this method does nothing. +func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) { if s == nil || err == nil || !s.IsRecording() { return } - s.SetStatus(codes.Error, "") opts = append(opts, trace.WithAttributes( - errorTypeKey.String(typeStr(err)), - errorMessageKey.String(err.Error()), + semconv.ExceptionTypeKey.String(typeStr(err)), + semconv.ExceptionMessageKey.String(err.Error()), )) - s.addEvent(errorEventName, opts...) + + c := trace.NewEventConfig(opts...) + if c.StackTrace() { + opts = append(opts, trace.WithAttributes( + semconv.ExceptionStacktraceKey.String(recordStackTrace()), + )) + } + + s.addEvent(semconv.ExceptionEventName, opts...) } func typeStr(i interface{}) string { @@ -179,220 +315,341 @@ func typeStr(i interface{}) string { return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) } -func (s *span) Tracer() trace.Tracer { - return s.tracer +func recordStackTrace() string { + stackTrace := make([]byte, 2048) + n := runtime.Stack(stackTrace, false) + + return string(stackTrace[0:n]) } -func (s *span) AddEvent(name string, o ...trace.EventOption) { +// AddEvent adds an event with the provided name and options. If this span is +// not being recorded than this method does nothing. +func (s *recordingSpan) AddEvent(name string, o ...trace.EventOption) { if !s.IsRecording() { return } s.addEvent(name, o...) } -func (s *span) addEvent(name string, o ...trace.EventOption) { +func (s *recordingSpan) addEvent(name string, o ...trace.EventOption) { c := trace.NewEventConfig(o...) + // Discard over limited attributes + attributes := c.Attributes() + var discarded int + if len(attributes) > s.spanLimits.AttributePerEventCountLimit { + discarded = len(attributes) - s.spanLimits.AttributePerEventCountLimit + attributes = attributes[:s.spanLimits.AttributePerEventCountLimit] + } s.mu.Lock() defer s.mu.Unlock() - s.messageEvents.add(export.Event{ - Name: name, - Attributes: c.Attributes, - Time: c.Timestamp, + s.events.add(Event{ + Name: name, + Attributes: attributes, + DroppedAttributeCount: discarded, + Time: c.Timestamp(), }) } -var errUninitializedSpan = errors.New("failed to set name on uninitialized span") +// SetName sets the name of this span. If this span is not being recorded than +// this method does nothing. +func (s *recordingSpan) SetName(name string) { + if !s.IsRecording() { + return + } -func (s *span) SetName(name string) { s.mu.Lock() defer s.mu.Unlock() + s.name = name +} - if s.data == nil { - otel.Handle(errUninitializedSpan) - return - } - s.data.Name = name - // SAMPLING - noParent := !s.data.ParentSpanID.IsValid() - var ctx trace.SpanContext - if noParent { - ctx = trace.SpanContext{} - } else { - // FIXME: Where do we get the parent context from? - ctx = s.data.SpanContext +// Name returns the name of this span. +func (s *recordingSpan) Name() string { + s.mu.Lock() + defer s.mu.Unlock() + return s.name +} + +// Name returns the SpanContext of this span's parent span. +func (s *recordingSpan) Parent() trace.SpanContext { + s.mu.Lock() + defer s.mu.Unlock() + return s.parent +} + +// SpanKind returns the SpanKind of this span. +func (s *recordingSpan) SpanKind() trace.SpanKind { + s.mu.Lock() + defer s.mu.Unlock() + return s.spanKind +} + +// StartTime returns the time this span started. +func (s *recordingSpan) StartTime() time.Time { + s.mu.Lock() + defer s.mu.Unlock() + return s.startTime +} + +// EndTime returns the time this span ended. For spans that have not yet +// ended, the returned value will be the zero value of time.Time. +func (s *recordingSpan) EndTime() time.Time { + s.mu.Lock() + defer s.mu.Unlock() + return s.endTime +} + +// Attributes returns the attributes of this span. +func (s *recordingSpan) Attributes() []attribute.KeyValue { + s.mu.Lock() + defer s.mu.Unlock() + if s.attributes.evictList.Len() == 0 { + return []attribute.KeyValue{} } - data := samplingData{ - noParent: noParent, - remoteParent: s.data.HasRemoteParent, - parent: ctx, - name: name, - cfg: s.tracer.provider.config.Load().(*Config), - span: s, - attributes: s.data.Attributes, - links: s.data.Links, - kind: s.data.SpanKind, + return s.attributes.toKeyValue() +} + +// Links returns the links of this span. +func (s *recordingSpan) Links() []Link { + s.mu.Lock() + defer s.mu.Unlock() + if len(s.links.queue) == 0 { + return []Link{} } - sampled := makeSamplingDecision(data) + return s.interfaceArrayToLinksArray() +} - // Adding attributes directly rather than using s.SetAttributes() - // as s.mu is already locked and attempting to do so would deadlock. - for _, a := range sampled.Attributes { - s.attributes.add(a) +// Events returns the events of this span. +func (s *recordingSpan) Events() []Event { + s.mu.Lock() + defer s.mu.Unlock() + if len(s.events.queue) == 0 { + return []Event{} } + return s.interfaceArrayToEventArray() +} + +// Status returns the status of this span. +func (s *recordingSpan) Status() Status { + s.mu.Lock() + defer s.mu.Unlock() + return s.status +} + +// InstrumentationLibrary returns the instrumentation.Library associated with +// the Tracer that created this span. +func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library { + s.mu.Lock() + defer s.mu.Unlock() + return s.instrumentationLibrary } -func (s *span) addLink(link trace.Link) { +// Resource returns the Resource associated with the Tracer that created this +// span. +func (s *recordingSpan) Resource() *resource.Resource { + s.mu.Lock() + defer s.mu.Unlock() + return s.resource +} + +func (s *recordingSpan) addLink(link trace.Link) { if !s.IsRecording() { return } s.mu.Lock() defer s.mu.Unlock() - s.links.add(link) + + var droppedAttributeCount int + + // Discard over limited attributes + if len(link.Attributes) > s.spanLimits.AttributePerLinkCountLimit { + droppedAttributeCount = len(link.Attributes) - s.spanLimits.AttributePerLinkCountLimit + link.Attributes = link.Attributes[:s.spanLimits.AttributePerLinkCountLimit] + } + + s.links.add(Link{link.SpanContext, link.Attributes, droppedAttributeCount}) } -// makeSpanData produces a SpanData representing the current state of the span. -// It requires that s.data is non-nil. -func (s *span) makeSpanData() *export.SpanData { - var sd export.SpanData +// DroppedAttributes returns the number of attributes dropped by the span +// due to limits being reached. +func (s *recordingSpan) DroppedAttributes() int { s.mu.Lock() defer s.mu.Unlock() - sd = *s.data + return s.attributes.droppedCount +} - s.attributes.toSpanData(&sd) +// DroppedLinks returns the number of links dropped by the span due to limits +// being reached. +func (s *recordingSpan) DroppedLinks() int { + s.mu.Lock() + defer s.mu.Unlock() + return s.links.droppedCount +} - if len(s.messageEvents.queue) > 0 { - sd.MessageEvents = s.interfaceArrayToMessageEventArray() - sd.DroppedMessageEventCount = s.messageEvents.droppedCount +// DroppedEvents returns the number of events dropped by the span due to +// limits being reached. +func (s *recordingSpan) DroppedEvents() int { + s.mu.Lock() + defer s.mu.Unlock() + return s.events.droppedCount +} + +// ChildSpanCount returns the count of spans that consider the span a +// direct parent. +func (s *recordingSpan) ChildSpanCount() int { + s.mu.Lock() + defer s.mu.Unlock() + return s.childSpanCount +} + +// TracerProvider returns a trace.TracerProvider that can be used to generate +// additional Spans on the same telemetry pipeline as the current Span. +func (s *recordingSpan) TracerProvider() trace.TracerProvider { + return s.tracer.provider +} + +// snapshot creates a read-only copy of the current state of the span. +func (s *recordingSpan) snapshot() ReadOnlySpan { + var sd snapshot + s.mu.Lock() + defer s.mu.Unlock() + + sd.endTime = s.endTime + sd.instrumentationLibrary = s.instrumentationLibrary + sd.name = s.name + sd.parent = s.parent + sd.resource = s.resource + sd.spanContext = s.spanContext + sd.spanKind = s.spanKind + sd.startTime = s.startTime + sd.status = s.status + sd.childSpanCount = s.childSpanCount + + if s.attributes.evictList.Len() > 0 { + sd.attributes = s.attributes.toKeyValue() + sd.droppedAttributeCount = s.attributes.droppedCount + } + if len(s.events.queue) > 0 { + sd.events = s.interfaceArrayToEventArray() + sd.droppedEventCount = s.events.droppedCount } if len(s.links.queue) > 0 { - sd.Links = s.interfaceArrayToLinksArray() - sd.DroppedLinkCount = s.links.droppedCount + sd.links = s.interfaceArrayToLinksArray() + sd.droppedLinkCount = s.links.droppedCount } return &sd } -func (s *span) interfaceArrayToLinksArray() []trace.Link { - linkArr := make([]trace.Link, 0) +func (s *recordingSpan) interfaceArrayToLinksArray() []Link { + linkArr := make([]Link, 0) for _, value := range s.links.queue { - linkArr = append(linkArr, value.(trace.Link)) + linkArr = append(linkArr, value.(Link)) } return linkArr } -func (s *span) interfaceArrayToMessageEventArray() []export.Event { - messageEventArr := make([]export.Event, 0) - for _, value := range s.messageEvents.queue { - messageEventArr = append(messageEventArr, value.(export.Event)) +func (s *recordingSpan) interfaceArrayToEventArray() []Event { + eventArr := make([]Event, 0) + for _, value := range s.events.queue { + eventArr = append(eventArr, value.(Event)) } - return messageEventArr + return eventArr } -func (s *span) copyToCappedAttributes(attributes ...label.KeyValue) { +func (s *recordingSpan) copyToCappedAttributes(attributes ...attribute.KeyValue) { s.mu.Lock() defer s.mu.Unlock() for _, a := range attributes { - if a.Value.Type() != label.INVALID { + // Ensure attributes conform to the specification: + // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.0.1/specification/common/common.md#attributes + if a.Valid() { s.attributes.add(a) } } } -func (s *span) addChild() { +func (s *recordingSpan) addChild() { if !s.IsRecording() { return } s.mu.Lock() - s.data.ChildSpanCount++ + s.childSpanCount++ s.mu.Unlock() } -func startSpanInternal(ctx context.Context, tr *tracer, name string, parent trace.SpanContext, remoteParent bool, o *trace.SpanConfig) *span { - var noParent bool - span := &span{} - span.spanContext = parent - - cfg := tr.provider.config.Load().(*Config) +func (*recordingSpan) private() {} - if parent == emptySpanContext { - // Generate both TraceID and SpanID - span.spanContext.TraceID, span.spanContext.SpanID = cfg.IDGenerator.NewIDs(ctx) - noParent = true - } else { - // TraceID already exists, just generate a SpanID - span.spanContext.SpanID = cfg.IDGenerator.NewSpanID(ctx, parent.TraceID) - } - data := samplingData{ - noParent: noParent, - remoteParent: remoteParent, - parent: parent, - name: name, - cfg: cfg, - span: span, - attributes: o.Attributes, - links: o.Links, - kind: o.SpanKind, +// runtimeTrace starts a "runtime/trace".Task for the span and returns a +// context containing the task. +func (s *recordingSpan) runtimeTrace(ctx context.Context) context.Context { + if !rt.IsEnabled() { + // Avoid additional overhead if runtime/trace is not enabled. + return ctx } - sampled := makeSamplingDecision(data) + nctx, task := rt.NewTask(ctx, s.name) - if !span.spanContext.IsSampled() && !o.Record { - return span - } + s.mu.Lock() + s.executionTracerTaskEnd = task.End + s.mu.Unlock() - startTime := o.Timestamp - if startTime.IsZero() { - startTime = time.Now() - } - span.data = &export.SpanData{ - SpanContext: span.spanContext, - StartTime: startTime, - SpanKind: trace.ValidateSpanKind(o.SpanKind), - Name: name, - HasRemoteParent: remoteParent, - Resource: cfg.Resource, - InstrumentationLibrary: tr.instrumentationLibrary, - } - span.attributes = newAttributesMap(cfg.MaxAttributesPerSpan) - span.messageEvents = newEvictedQueue(cfg.MaxEventsPerSpan) - span.links = newEvictedQueue(cfg.MaxLinksPerSpan) + return nctx +} - span.SetAttributes(sampled.Attributes...) +// nonRecordingSpan is a minimal implementation of the OpenTelemetry Span API +// that wraps a SpanContext. It performs no operations other than to return +// the wrapped SpanContext or TracerProvider that created it. +type nonRecordingSpan struct { + // tracer is the SDK tracer that created this span. + tracer *tracer + sc trace.SpanContext +} - if !noParent { - span.data.ParentSpanID = parent.SpanID - } +var _ trace.Span = nonRecordingSpan{} - return span -} - -type samplingData struct { - noParent bool - remoteParent bool - parent trace.SpanContext - name string - cfg *Config - span *span - attributes []label.KeyValue - links []trace.Link - kind trace.SpanKind -} - -func makeSamplingDecision(data samplingData) SamplingResult { - sampler := data.cfg.DefaultSampler - spanContext := &data.span.spanContext - sampled := sampler.ShouldSample(SamplingParameters{ - ParentContext: data.parent, - TraceID: spanContext.TraceID, - Name: data.name, - HasRemoteParent: data.remoteParent, - Kind: data.kind, - Attributes: data.attributes, - Links: data.links, - }) - if sampled.Decision == RecordAndSample { - spanContext.TraceFlags |= trace.FlagsSampled - } else { - spanContext.TraceFlags &^= trace.FlagsSampled - } - return sampled +// SpanContext returns the wrapped SpanContext. +func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc } + +// IsRecording always returns false. +func (nonRecordingSpan) IsRecording() bool { return false } + +// SetStatus does nothing. +func (nonRecordingSpan) SetStatus(codes.Code, string) {} + +// SetError does nothing. +func (nonRecordingSpan) SetError(bool) {} + +// SetAttributes does nothing. +func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {} + +// End does nothing. +func (nonRecordingSpan) End(...trace.SpanEndOption) {} + +// RecordError does nothing. +func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {} + +// AddEvent does nothing. +func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {} + +// SetName does nothing. +func (nonRecordingSpan) SetName(string) {} + +// TracerProvider returns the trace.TracerProvider that provided the Tracer +// that created this span. +func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider } + +func isRecording(s SamplingResult) bool { + return s.Decision == RecordOnly || s.Decision == RecordAndSample +} + +func isSampled(s SamplingResult) bool { + return s.Decision == RecordAndSample +} + +// Status is the classified state of a Span. +type Status struct { + // Code is an identifier of a Spans state classification. + Code codes.Code + // Description is a user hint about why that status was set. It is only + // applicable when Code is Error. + Description string } diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go new file mode 100644 index 000000000..9fb3d6eac --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import "context" + +// SpanExporter handles the delivery of spans to external receivers. This is +// the final component in the trace export pipeline. +type SpanExporter interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // ExportSpans exports a batch of spans. + // + // This function is called synchronously, so there is no concurrency + // safety requirement. However, due to the synchronous calling pattern, + // it is critical that all timeouts and cancellations contained in the + // passed context must be honored. + // + // Any retry logic must be contained in this function. The SDK that + // calls this function will not implement any retry logic. All errors + // returned by this function are considered unrecoverable and will be + // reported to a configured error Handler. + ExportSpans(ctx context.Context, spans []ReadOnlySpan) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Shutdown notifies the exporter of a pending halt to operations. The + // exporter is expected to preform any cleanup or synchronization it + // requires while honoring all timeouts and cancellations contained in + // the passed context. + Shutdown(ctx context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go index 79f0c6191..b649a2ff0 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go @@ -17,31 +17,47 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" "sync" - - export "go.opentelemetry.io/otel/sdk/export/trace" ) -// SpanProcessor is interface to add hooks to start and end method invocations. +// SpanProcessor is a processing pipeline for spans in the trace signal. +// SpanProcessors registered with a TracerProvider and are called at the start +// and end of a Span's lifecycle, and are called in the order they are +// registered. type SpanProcessor interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. - // OnStart method is invoked when span is started. It is a synchronous call - // and hence should not block. - OnStart(parent context.Context, sd *export.SpanData) + // OnStart is called when a span is started. It is called synchronously + // and should not block. + OnStart(parent context.Context, s ReadWriteSpan) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. - // OnEnd method is invoked when span is finished. It is a synchronous call - // and hence should not block. - OnEnd(sd *export.SpanData) + // OnEnd is called when span is finished. It is called synchronously and + // hence not block. + OnEnd(s ReadOnlySpan) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. - // Shutdown is invoked when SDK shuts down. Use this call to cleanup any processor - // data. No calls to OnStart and OnEnd method is invoked after Shutdown call is - // made. It should not be blocked indefinitely. + // Shutdown is called when the SDK shuts down. Any cleanup or release of + // resources held by the processor should be done in this call. + // + // Calls to OnStart, OnEnd, or ForceFlush after this has been called + // should be ignored. + // + // All timeouts and cancellations contained in ctx must be honored, this + // should not block indefinitely. Shutdown(ctx context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. // ForceFlush exports all ended spans to the configured Exporter that have not yet // been exported. It should only be called when absolutely necessary, such as when // using a FaaS provider that may suspend the process after an invocation, but before // the Processor can export the completed spans. - ForceFlush() + ForceFlush(ctx context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. } type spanProcessorState struct { diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go index ea90bb65f..afd4b5b5d 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go @@ -16,8 +16,8 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" + "time" - "go.opentelemetry.io/otel/internal/trace/parent" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/sdk/instrumentation" @@ -34,38 +34,120 @@ var _ trace.Tracer = &tracer{} // // The Span is created with the provided name and as a child of any existing // span context found in the passed context. The created Span will be -// configured appropriately by any SpanOption passed. Any Timestamp option -// passed will be used as the start time of the Span's life-cycle. -func (tr *tracer) Start(ctx context.Context, name string, options ...trace.SpanOption) (context.Context, trace.Span) { - config := trace.NewSpanConfig(options...) - - parentSpanContext, remoteParent, links := parent.GetSpanContextAndLinks(ctx, config.NewRoot) +// configured appropriately by any SpanOption passed. +func (tr *tracer) Start(ctx context.Context, name string, options ...trace.SpanStartOption) (context.Context, trace.Span) { + config := trace.NewSpanStartConfig(options...) + // For local spans created by this SDK, track child span count. if p := trace.SpanFromContext(ctx); p != nil { - if sdkSpan, ok := p.(*span); ok { + if sdkSpan, ok := p.(*recordingSpan); ok { sdkSpan.addChild() } } - span := startSpanInternal(ctx, tr, name, parentSpanContext, remoteParent, config) - for _, l := range links { - span.addLink(l) + s := tr.newSpan(ctx, name, &config) + if rw, ok := s.(ReadWriteSpan); ok && s.IsRecording() { + sps, _ := tr.provider.spanProcessors.Load().(spanProcessorStates) + for _, sp := range sps { + sp.sp.OnStart(ctx, rw) + } } - for _, l := range config.Links { - span.addLink(l) + if rtt, ok := s.(runtimeTracer); ok { + ctx = rtt.runtimeTrace(ctx) } - span.SetAttributes(config.Attributes...) - span.tracer = tr + return trace.ContextWithSpan(ctx, s), s +} - if span.IsRecording() { - sps, _ := tr.provider.spanProcessors.Load().(spanProcessorStates) - for _, sp := range sps { - sp.sp.OnStart(ctx, span.data) - } +type runtimeTracer interface { + // runtimeTrace starts a "runtime/trace".Task for the span and + // returns a context containing the task. + runtimeTrace(ctx context.Context) context.Context +} + +// newSpan returns a new configured span. +func (tr *tracer) newSpan(ctx context.Context, name string, config *trace.SpanConfig) trace.Span { + // If told explicitly to make this a new root use a zero value SpanContext + // as a parent which contains an invalid trace ID and is not remote. + var psc trace.SpanContext + if config.NewRoot() { + ctx = trace.ContextWithSpanContext(ctx, psc) + } else { + psc = trace.SpanContextFromContext(ctx) + } + + // If there is a valid parent trace ID, use it to ensure the continuity of + // the trace. Always generate a new span ID so other components can rely + // on a unique span ID, even if the Span is non-recording. + var tid trace.TraceID + var sid trace.SpanID + if !psc.TraceID().IsValid() { + tid, sid = tr.provider.idGenerator.NewIDs(ctx) + } else { + tid = psc.TraceID() + sid = tr.provider.idGenerator.NewSpanID(ctx, tid) + } + + samplingResult := tr.provider.sampler.ShouldSample(SamplingParameters{ + ParentContext: ctx, + TraceID: tid, + Name: name, + Kind: config.SpanKind(), + Attributes: config.Attributes(), + Links: config.Links(), + }) + + scc := trace.SpanContextConfig{ + TraceID: tid, + SpanID: sid, + TraceState: samplingResult.Tracestate, + } + if isSampled(samplingResult) { + scc.TraceFlags = psc.TraceFlags() | trace.FlagsSampled + } else { + scc.TraceFlags = psc.TraceFlags() &^ trace.FlagsSampled + } + sc := trace.NewSpanContext(scc) + + if !isRecording(samplingResult) { + return tr.newNonRecordingSpan(sc) + } + return tr.newRecordingSpan(psc, sc, name, samplingResult, config) +} + +// newRecordingSpan returns a new configured recordingSpan. +func (tr *tracer) newRecordingSpan(psc, sc trace.SpanContext, name string, sr SamplingResult, config *trace.SpanConfig) *recordingSpan { + startTime := config.Timestamp() + if startTime.IsZero() { + startTime = time.Now() } - ctx, end := startExecutionTracerTask(ctx, name) - span.executionTracerTaskEnd = end - return trace.ContextWithSpan(ctx, span), span + s := &recordingSpan{ + parent: psc, + spanContext: sc, + spanKind: trace.ValidateSpanKind(config.SpanKind()), + name: name, + startTime: startTime, + attributes: newAttributesMap(tr.provider.spanLimits.AttributeCountLimit), + events: newEvictedQueue(tr.provider.spanLimits.EventCountLimit), + links: newEvictedQueue(tr.provider.spanLimits.LinkCountLimit), + tracer: tr, + spanLimits: tr.provider.spanLimits, + resource: tr.provider.resource, + instrumentationLibrary: tr.instrumentationLibrary, + } + + for _, l := range config.Links() { + s.addLink(l) + } + + s.SetAttributes(sr.Attributes...) + s.SetAttributes(config.Attributes()...) + + return s +} + +// newNonRecordingSpan returns a new configured nonRecordingSpan. +func (tr *tracer) newNonRecordingSpan(sc trace.SpanContext) nonRecordingSpan { + return nonRecordingSpan{tracer: tr, sc: sc} } diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/semconv/resource.go b/src/runtime/vendor/go.opentelemetry.io/otel/semconv/resource.go deleted file mode 100644 index 59a326d27..000000000 --- a/src/runtime/vendor/go.opentelemetry.io/otel/semconv/resource.go +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package semconv // import "go.opentelemetry.io/otel/semconv" - -import "go.opentelemetry.io/otel/label" - -// Semantic conventions for service resource attribute keys. -const ( - // Name of the service. - ServiceNameKey = label.Key("service.name") - - // A namespace for `service.name`. This needs to have meaning that helps - // to distinguish a group of services. For example, the team name that - // owns a group of services. `service.name` is expected to be unique - // within the same namespace. - ServiceNamespaceKey = label.Key("service.namespace") - - // A unique identifier of the service instance. In conjunction with the - // `service.name` and `service.namespace` this must be unique. - ServiceInstanceIDKey = label.Key("service.instance.id") - - // The version of the service API. - ServiceVersionKey = label.Key("service.version") -) - -// Semantic conventions for telemetry SDK resource attribute keys. -const ( - // The name of the telemetry SDK. - // - // The default OpenTelemetry SDK provided by the OpenTelemetry project - // MUST set telemetry.sdk.name to the value `opentelemetry`. - // - // If another SDK is used, this attribute MUST be set to the import path - // of that SDK's package. - // - // The value `opentelemetry` is reserved and MUST NOT be used by - // non-OpenTelemetry SDKs. - TelemetrySDKNameKey = label.Key("telemetry.sdk.name") - - // The language of the telemetry SDK. - TelemetrySDKLanguageKey = label.Key("telemetry.sdk.language") - - // The version string of the telemetry SDK. - TelemetrySDKVersionKey = label.Key("telemetry.sdk.version") -) - -// Semantic conventions for telemetry SDK resource attributes. -var ( - TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") -) - -// Semantic conventions for container resource attribute keys. -const ( - // A uniquely identifying name for the Container. - ContainerNameKey = label.Key("container.name") - - // Container ID, usually a UUID, as for example used to - // identify Docker containers. The UUID might be abbreviated. - ContainerIDKey = label.Key("container.id") - - // Name of the image the container was built on. - ContainerImageNameKey = label.Key("container.image.name") - - // Container image tag. - ContainerImageTagKey = label.Key("container.image.tag") -) - -// Semantic conventions for Function-as-a-Service resource attribute keys. -const ( - // A uniquely identifying name for the FaaS. - FaaSNameKey = label.Key("faas.name") - - // The unique name of the function being executed. - FaaSIDKey = label.Key("faas.id") - - // The version of the function being executed. - FaaSVersionKey = label.Key("faas.version") - - // The execution environment identifier. - FaaSInstanceKey = label.Key("faas.instance") -) - -// Semantic conventions for operating system process resource attribute keys. -const ( - // Process identifier (PID). - ProcessPIDKey = label.Key("process.pid") - // The name of the process executable. On Linux based systems, can be - // set to the `Name` in `proc/[pid]/status`. On Windows, can be set to - // the base name of `GetProcessImageFileNameW`. - ProcessExecutableNameKey = label.Key("process.executable.name") - // The full path to the process executable. On Linux based systems, can - // be set to the target of `proc/[pid]/exe`. On Windows, can be set to - // the result of `GetProcessImageFileNameW`. - ProcessExecutablePathKey = label.Key("process.executable.path") - // The command used to launch the process (i.e. the command name). On - // Linux based systems, can be set to the zeroth string in - // `proc/[pid]/cmdline`. On Windows, can be set to the first parameter - // extracted from `GetCommandLineW`. - ProcessCommandKey = label.Key("process.command") - // The full command used to launch the process. The value can be either - // a list of strings representing the ordered list of arguments, or a - // single string representing the full command. On Linux based systems, - // can be set to the list of null-delimited strings extracted from - // `proc/[pid]/cmdline`. On Windows, can be set to the result of - // `GetCommandLineW`. - ProcessCommandLineKey = label.Key("process.command_line") - // The username of the user that owns the process. - ProcessOwnerKey = label.Key("process.owner") -) - -// Semantic conventions for Kubernetes resource attribute keys. -const ( - // A uniquely identifying name for the Kubernetes cluster. Kubernetes - // does not have cluster names as an internal concept so this may be - // set to any meaningful value within the environment. For example, - // GKE clusters have a name which can be used for this label. - K8SClusterNameKey = label.Key("k8s.cluster.name") - - // The name of the namespace that the pod is running in. - K8SNamespaceNameKey = label.Key("k8s.namespace.name") - - // The uid of the Pod. - K8SPodUIDKey = label.Key("k8s.pod.uid") - - // The name of the pod. - K8SPodNameKey = label.Key("k8s.pod.name") - - // The name of the Container in a Pod template. - K8SContainerNameKey = label.Key("k8s.container.name") - - // The uid of the ReplicaSet. - K8SReplicaSetUIDKey = label.Key("k8s.replicaset.uid") - - // The name of the ReplicaSet. - K8SReplicaSetNameKey = label.Key("k8s.replicaset.name") - - // The uid of the Deployment. - K8SDeploymentUIDKey = label.Key("k8s.deployment.uid") - - // The name of the deployment. - K8SDeploymentNameKey = label.Key("k8s.deployment.name") - - // The uid of the StatefulSet. - K8SStatefulSetUIDKey = label.Key("k8s.statefulset.uid") - - // The name of the StatefulSet. - K8SStatefulSetNameKey = label.Key("k8s.statefulset.name") - - // The uid of the DaemonSet. - K8SDaemonSetUIDKey = label.Key("k8s.daemonset.uid") - - // The name of the DaemonSet. - K8SDaemonSetNameKey = label.Key("k8s.daemonset.name") - - // The uid of the Job. - K8SJobUIDKey = label.Key("k8s.job.uid") - - // The name of the Job. - K8SJobNameKey = label.Key("k8s.job.name") - - // The uid of the CronJob. - K8SCronJobUIDKey = label.Key("k8s.cronjob.uid") - - // The name of the CronJob. - K8SCronJobNameKey = label.Key("k8s.cronjob.name") -) - -// Semantic conventions for host resource attribute keys. -const ( - // A uniquely identifying name for the host: 'hostname', FQDN, or user specified name - HostNameKey = label.Key("host.name") - - // Unique host ID. For cloud environments this will be the instance ID. - HostIDKey = label.Key("host.id") - - // Type of host. For cloud environments this will be the machine type. - HostTypeKey = label.Key("host.type") - - // Name of the OS or VM image the host is running. - HostImageNameKey = label.Key("host.image.name") - - // Identifier of the image the host is running. - HostImageIDKey = label.Key("host.image.id") - - // Version of the image the host is running. - HostImageVersionKey = label.Key("host.image.version") -) - -// Semantic conventions for cloud environment resource attribute keys. -const ( - // Name of the cloud provider. - CloudProviderKey = label.Key("cloud.provider") - - // The account ID from the cloud provider used for authorization. - CloudAccountIDKey = label.Key("cloud.account.id") - - // Geographical region where this resource is. - CloudRegionKey = label.Key("cloud.region") - - // Zone of the region where this resource is. - CloudZoneKey = label.Key("cloud.zone") -) - -// Semantic conventions for common cloud provider resource attributes. -var ( - CloudProviderAWS = CloudProviderKey.String("aws") - CloudProviderAzure = CloudProviderKey.String("azure") - CloudProviderGCP = CloudProviderKey.String("gcp") -) - -// Semantic conventions for deployment attributes. -const ( - // Name of the deployment environment (aka deployment tier); e.g. (staging, production). - DeploymentEnvironmentKey = label.Key("deployment.environment") -) diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/semconv/trace.go b/src/runtime/vendor/go.opentelemetry.io/otel/semconv/trace.go deleted file mode 100644 index 5f780943c..000000000 --- a/src/runtime/vendor/go.opentelemetry.io/otel/semconv/trace.go +++ /dev/null @@ -1,355 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package semconv // import "go.opentelemetry.io/otel/semconv" - -import "go.opentelemetry.io/otel/label" - -// Semantic conventions for attribute keys used for network related -// operations. -const ( - // Transport protocol used. - NetTransportKey = label.Key("net.transport") - - // Remote address of the peer. - NetPeerIPKey = label.Key("net.peer.ip") - - // Remote port number. - NetPeerPortKey = label.Key("net.peer.port") - - // Remote hostname or similar. - NetPeerNameKey = label.Key("net.peer.name") - - // Local host IP. Useful in case of a multi-IP host. - NetHostIPKey = label.Key("net.host.ip") - - // Local host port. - NetHostPortKey = label.Key("net.host.port") - - // Local hostname or similar. - NetHostNameKey = label.Key("net.host.name") -) - -// Semantic conventions for common transport protocol attributes. -var ( - NetTransportTCP = NetTransportKey.String("IP.TCP") - NetTransportUDP = NetTransportKey.String("IP.UDP") - NetTransportIP = NetTransportKey.String("IP") - NetTransportUnix = NetTransportKey.String("Unix") - NetTransportPipe = NetTransportKey.String("pipe") - NetTransportInProc = NetTransportKey.String("inproc") - NetTransportOther = NetTransportKey.String("other") -) - -// General attribute keys for spans. -const ( - // Service name of the remote service. Should equal the actual - // `service.name` resource attribute of the remote service, if any. - PeerServiceKey = label.Key("peer.service") -) - -// Semantic conventions for attribute keys used to identify an authorized -// user. -const ( - // Username or the client identifier extracted from the access token or - // authorization header in the inbound request from outside the system. - EnduserIDKey = label.Key("enduser.id") - - // Actual or assumed role the client is making the request with. - EnduserRoleKey = label.Key("enduser.role") - - // Scopes or granted authorities the client currently possesses. - EnduserScopeKey = label.Key("enduser.scope") -) - -// Semantic conventions for attribute keys for HTTP. -const ( - // HTTP request method. - HTTPMethodKey = label.Key("http.method") - - // Full HTTP request URL in the form: - // scheme://host[:port]/path?query[#fragment]. - HTTPURLKey = label.Key("http.url") - - // The full request target as passed in a HTTP request line or - // equivalent, e.g. "/path/12314/?q=ddds#123". - HTTPTargetKey = label.Key("http.target") - - // The value of the HTTP host header. - HTTPHostKey = label.Key("http.host") - - // The URI scheme identifying the used protocol. - HTTPSchemeKey = label.Key("http.scheme") - - // HTTP response status code. - HTTPStatusCodeKey = label.Key("http.status_code") - - // Kind of HTTP protocol used. - HTTPFlavorKey = label.Key("http.flavor") - - // Value of the HTTP User-Agent header sent by the client. - HTTPUserAgentKey = label.Key("http.user_agent") - - // The primary server name of the matched virtual host. - HTTPServerNameKey = label.Key("http.server_name") - - // The matched route served (path template). For example, - // "/users/:userID?". - HTTPRouteKey = label.Key("http.route") - - // The IP address of the original client behind all proxies, if known - // (e.g. from X-Forwarded-For). - HTTPClientIPKey = label.Key("http.client_ip") - - // The size of the request payload body in bytes. - HTTPRequestContentLengthKey = label.Key("http.request_content_length") - - // The size of the uncompressed request payload body after transport decoding. - // Not set if transport encoding not used. - HTTPRequestContentLengthUncompressedKey = label.Key("http.request_content_length_uncompressed") - - // The size of the response payload body in bytes. - HTTPResponseContentLengthKey = label.Key("http.response_content_length") - - // The size of the uncompressed response payload body after transport decoding. - // Not set if transport encoding not used. - HTTPResponseContentLengthUncompressedKey = label.Key("http.response_content_length_uncompressed") -) - -// Semantic conventions for common HTTP attributes. -var ( - // Semantic conventions for HTTP(S) URI schemes. - HTTPSchemeHTTP = HTTPSchemeKey.String("http") - HTTPSchemeHTTPS = HTTPSchemeKey.String("https") - - // Semantic conventions for HTTP protocols. - HTTPFlavor1_0 = HTTPFlavorKey.String("1.0") - HTTPFlavor1_1 = HTTPFlavorKey.String("1.1") - HTTPFlavor2 = HTTPFlavorKey.String("2") - HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") - HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") -) - -// Semantic conventions for attribute keys for database connections. -const ( - // Identifier for the database system (DBMS) being used. - DBSystemKey = label.Key("db.system") - - // Database Connection String with embedded credentials removed. - DBConnectionStringKey = label.Key("db.connection_string") - - // Username for accessing database. - DBUserKey = label.Key("db.user") -) - -// Semantic conventions for common database system attributes. -var ( - DBSystemDB2 = DBSystemKey.String("db2") // IBM DB2 - DBSystemDerby = DBSystemKey.String("derby") // Apache Derby - DBSystemHive = DBSystemKey.String("hive") // Apache Hive - DBSystemMariaDB = DBSystemKey.String("mariadb") // MariaDB - DBSystemMSSql = DBSystemKey.String("mssql") // Microsoft SQL Server - DBSystemMySQL = DBSystemKey.String("mysql") // MySQL - DBSystemOracle = DBSystemKey.String("oracle") // Oracle Database - DBSystemPostgres = DBSystemKey.String("postgresql") // PostgreSQL - DBSystemSqlite = DBSystemKey.String("sqlite") // SQLite - DBSystemTeradata = DBSystemKey.String("teradata") // Teradata - DBSystemOtherSQL = DBSystemKey.String("other_sql") // Some other Sql database. Fallback only - DBSystemCassandra = DBSystemKey.String("cassandra") // Cassandra - DBSystemCosmosDB = DBSystemKey.String("cosmosdb") // Microsoft Azure CosmosDB - DBSystemCouchbase = DBSystemKey.String("couchbase") // Couchbase - DBSystemCouchDB = DBSystemKey.String("couchdb") // CouchDB - DBSystemDynamoDB = DBSystemKey.String("dynamodb") // Amazon DynamoDB - DBSystemHBase = DBSystemKey.String("hbase") // HBase - DBSystemMongodb = DBSystemKey.String("mongodb") // MongoDB - DBSystemNeo4j = DBSystemKey.String("neo4j") // Neo4j - DBSystemRedis = DBSystemKey.String("redis") // Redis -) - -// Semantic conventions for attribute keys for database calls. -const ( - // Database instance name. - DBNameKey = label.Key("db.name") - - // A database statement for the given database type. - DBStatementKey = label.Key("db.statement") - - // A database operation for the given database type. - DBOperationKey = label.Key("db.operation") -) - -// Database technology-specific attributes -const ( - // Name of the Cassandra keyspace accessed. Use instead of `db.name`. - DBCassandraKeyspaceKey = label.Key("db.cassandra.keyspace") - - // HBase namespace accessed. Use instead of `db.name`. - DBHBaseNamespaceKey = label.Key("db.hbase.namespace") - - // Index of Redis database accessed. Use instead of `db.name`. - DBRedisDBIndexKey = label.Key("db.redis.database_index") - - // Collection being accessed within the database in `db.name`. - DBMongoDBCollectionKey = label.Key("db.mongodb.collection") -) - -// Semantic conventions for attribute keys for RPC. -const ( - // A string identifying the remoting system. - RPCSystemKey = label.Key("rpc.system") - - // The full name of the service being called. - RPCServiceKey = label.Key("rpc.service") - - // The name of the method being called. - RPCMethodKey = label.Key("rpc.method") - - // Name of message transmitted or received. - RPCNameKey = label.Key("name") - - // Type of message transmitted or received. - RPCMessageTypeKey = label.Key("message.type") - - // Identifier of message transmitted or received. - RPCMessageIDKey = label.Key("message.id") - - // The compressed size of the message transmitted or received in bytes. - RPCMessageCompressedSizeKey = label.Key("message.compressed_size") - - // The uncompressed size of the message transmitted or received in - // bytes. - RPCMessageUncompressedSizeKey = label.Key("message.uncompressed_size") -) - -// Semantic conventions for common RPC attributes. -var ( - // Semantic convention for gRPC as the remoting system. - RPCSystemGRPC = RPCSystemKey.String("grpc") - - // Semantic convention for a message named message. - RPCNameMessage = RPCNameKey.String("message") - - // Semantic conventions for RPC message types. - RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") - RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") -) - -// Semantic conventions for attribute keys for messaging systems. -const ( - // A unique identifier describing the messaging system. For example, - // kafka, rabbitmq or activemq. - MessagingSystemKey = label.Key("messaging.system") - - // The message destination name, e.g. MyQueue or MyTopic. - MessagingDestinationKey = label.Key("messaging.destination") - - // The kind of message destination. - MessagingDestinationKindKey = label.Key("messaging.destination_kind") - - // Describes if the destination is temporary or not. - MessagingTempDestinationKey = label.Key("messaging.temp_destination") - - // The name of the transport protocol. - MessagingProtocolKey = label.Key("messaging.protocol") - - // The version of the transport protocol. - MessagingProtocolVersionKey = label.Key("messaging.protocol_version") - - // Messaging service URL. - MessagingURLKey = label.Key("messaging.url") - - // Identifier used by the messaging system for a message. - MessagingMessageIDKey = label.Key("messaging.message_id") - - // Identifier used by the messaging system for a conversation. - MessagingConversationIDKey = label.Key("messaging.conversation_id") - - // The (uncompressed) size of the message payload in bytes. - MessagingMessagePayloadSizeBytesKey = label.Key("messaging.message_payload_size_bytes") - - // The compressed size of the message payload in bytes. - MessagingMessagePayloadCompressedSizeBytesKey = label.Key("messaging.message_payload_compressed_size_bytes") - - // Identifies which part and kind of message consumption is being - // preformed. - MessagingOperationKey = label.Key("messaging.operation") - - // RabbitMQ specific attribute describing the destination routing key. - MessagingRabbitMQRoutingKeyKey = label.Key("messaging.rabbitmq.routing_key") -) - -// Semantic conventions for common messaging system attributes. -var ( - // Semantic conventions for message destinations. - MessagingDestinationKindKeyQueue = MessagingDestinationKindKey.String("queue") - MessagingDestinationKindKeyTopic = MessagingDestinationKindKey.String("topic") - - // Semantic convention for message destinations that are temporary. - MessagingTempDestination = MessagingTempDestinationKey.Bool(true) - - // Semantic convention for the operation parts of message consumption. - // This does not include a "send" attribute as that is explicitly not - // allowed in the OpenTelemetry specification. - MessagingOperationReceive = MessagingOperationKey.String("receive") - MessagingOperationProcess = MessagingOperationKey.String("process") -) - -// Semantic conventions for attribute keys for FaaS systems. -const ( - - // Type of the trigger on which the function is executed. - FaaSTriggerKey = label.Key("faas.trigger") - - // String containing the execution identifier of the function. - FaaSExecutionKey = label.Key("faas.execution") - - // A boolean indicating that the serverless function is executed - // for the first time (aka cold start). - FaaSColdstartKey = label.Key("faas.coldstart") - - // The name of the source on which the operation was performed. - // For example, in Cloud Storage or S3 corresponds to the bucket name, - // and in Cosmos DB to the database name. - FaaSDocumentCollectionKey = label.Key("faas.document.collection") - - // The type of the operation that was performed on the data. - FaaSDocumentOperationKey = label.Key("faas.document.operation") - - // A string containing the time when the data was accessed. - FaaSDocumentTimeKey = label.Key("faas.document.time") - - // The document name/table subjected to the operation. - FaaSDocumentNameKey = label.Key("faas.document.name") - - // The function invocation time. - FaaSTimeKey = label.Key("faas.time") - - // The schedule period as Cron Expression. - FaaSCronKey = label.Key("faas.cron") -) - -// Semantic conventions for common FaaS system attributes. -var ( - // Semantic conventions for the types of triggers. - FaasTriggerDatasource = FaaSTriggerKey.String("datasource") - FaasTriggerHTTP = FaaSTriggerKey.String("http") - FaasTriggerPubSub = FaaSTriggerKey.String("pubsub") - FaasTriggerTimer = FaaSTriggerKey.String("timer") - FaasTriggerOther = FaaSTriggerKey.String("other") - - // Semantic conventions for the types of operations performed. - FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") - FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") - FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") -) diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/semconv/doc.go b/src/runtime/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/doc.go similarity index 64% rename from src/runtime/vendor/go.opentelemetry.io/otel/semconv/doc.go rename to src/runtime/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/doc.go index 9a729fdec..c0b1723f8 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/semconv/doc.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/doc.go @@ -14,11 +14,7 @@ // Package semconv implements OpenTelemetry semantic conventions. // -// This package is currently in a pre-GA phase. Backwards incompatible changes -// may be introduced in subsequent minor version releases as we work to track -// the evolving OpenTelemetry specification and user feedback. -// // OpenTelemetry semantic conventions are agreed standardized naming -// patterns for OpenTelemetry things. This package aims to be the -// centralized place to interact with these conventions. -package semconv // import "go.opentelemetry.io/otel/semconv" +// patterns for OpenTelemetry things. This package represents the conventions +// as of the v1.4.0 version of the OpenTelemetry specification. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.4.0" diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/unit/unit.go b/src/runtime/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/exception.go similarity index 79% rename from src/runtime/vendor/go.opentelemetry.io/otel/unit/unit.go rename to src/runtime/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/exception.go index 523bfe1d0..ee13cfdbd 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/unit/unit.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/exception.go @@ -12,12 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -package unit // import "go.opentelemetry.io/otel/unit" - -type Unit string +package semconv const ( - Dimensionless Unit = "1" - Bytes Unit = "By" - Milliseconds Unit = "ms" + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" ) diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/semconv/http.go b/src/runtime/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/http.go similarity index 80% rename from src/runtime/vendor/go.opentelemetry.io/otel/semconv/http.go rename to src/runtime/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/http.go index 50b0f696e..569e3c645 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/semconv/http.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/http.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package semconv // import "go.opentelemetry.io/otel/semconv" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.4.0" import ( "fmt" @@ -21,16 +21,22 @@ import ( "strconv" "strings" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/label" +) + +// HTTP scheme attributes. +var ( + HTTPSchemeHTTP = HTTPSchemeKey.String("http") + HTTPSchemeHTTPS = HTTPSchemeKey.String("https") ) // NetAttributesFromHTTPRequest generates attributes of the net // namespace as specified by the OpenTelemetry specification for a // span. The network parameter is a string that net.Dial function // from standard library can understand. -func NetAttributesFromHTTPRequest(network string, request *http.Request) []label.KeyValue { - attrs := []label.KeyValue{} +func NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{} switch network { case "tcp", "tcp4", "tcp6": @@ -121,9 +127,9 @@ func NetAttributesFromHTTPRequest(network string, request *http.Request) []label // EndUserAttributesFromHTTPRequest generates attributes of the // enduser namespace as specified by the OpenTelemetry specification // for a span. -func EndUserAttributesFromHTTPRequest(request *http.Request) []label.KeyValue { +func EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { if username, _, ok := request.BasicAuth(); ok { - return []label.KeyValue{EnduserIDKey.String(username)} + return []attribute.KeyValue{EnduserIDKey.String(username)} } return nil } @@ -131,8 +137,8 @@ func EndUserAttributesFromHTTPRequest(request *http.Request) []label.KeyValue { // HTTPClientAttributesFromHTTPRequest generates attributes of the // http namespace as specified by the OpenTelemetry specification for // a span on the client side. -func HTTPClientAttributesFromHTTPRequest(request *http.Request) []label.KeyValue { - attrs := []label.KeyValue{} +func HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{} if request.Method != "" { attrs = append(attrs, HTTPMethodKey.String(request.Method)) @@ -140,13 +146,21 @@ func HTTPClientAttributesFromHTTPRequest(request *http.Request) []label.KeyValue attrs = append(attrs, HTTPMethodKey.String(http.MethodGet)) } + // remove any username/password info that may be in the URL + // before adding it to the attributes + userinfo := request.URL.User + request.URL.User = nil + attrs = append(attrs, HTTPURLKey.String(request.URL.String())) + // restore any username/password info that was removed + request.URL.User = userinfo + return append(attrs, httpCommonAttributesFromHTTPRequest(request)...) } -func httpCommonAttributesFromHTTPRequest(request *http.Request) []label.KeyValue { - attrs := []label.KeyValue{} +func httpCommonAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{} if ua := request.UserAgent(); ua != "" { attrs = append(attrs, HTTPUserAgentKey.String(ua)) } @@ -157,9 +171,9 @@ func httpCommonAttributesFromHTTPRequest(request *http.Request) []label.KeyValue return append(attrs, httpBasicAttributesFromHTTPRequest(request)...) } -func httpBasicAttributesFromHTTPRequest(request *http.Request) []label.KeyValue { +func httpBasicAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { // as these attributes are used by HTTPServerMetricAttributesFromHTTPRequest, they should be low-cardinality - attrs := []label.KeyValue{} + attrs := []attribute.KeyValue{} if request.TLS != nil { attrs = append(attrs, HTTPSchemeHTTPS) @@ -186,8 +200,8 @@ func httpBasicAttributesFromHTTPRequest(request *http.Request) []label.KeyValue // HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes // to be used with server-side HTTP metrics. -func HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []label.KeyValue { - attrs := []label.KeyValue{} +func HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{} if serverName != "" { attrs = append(attrs, HTTPServerNameKey.String(serverName)) } @@ -198,8 +212,8 @@ func HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http. // http namespace as specified by the OpenTelemetry specification for // a span on the server side. Currently, only basic authentication is // supported. -func HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []label.KeyValue { - attrs := []label.KeyValue{ +func HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{ HTTPMethodKey.String(request.Method), HTTPTargetKey.String(request.RequestURI), } @@ -220,8 +234,8 @@ func HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http // HTTPAttributesFromHTTPStatusCode generates attributes of the http // namespace as specified by the OpenTelemetry specification for a // span. -func HTTPAttributesFromHTTPStatusCode(code int) []label.KeyValue { - attrs := []label.KeyValue{ +func HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue { + attrs := []attribute.KeyValue{ HTTPStatusCodeKey.Int(code), } return attrs @@ -264,29 +278,34 @@ var validRangesPerCategory = map[int][]codeRange{ // SpanStatusFromHTTPStatusCode generates a status code and a message // as specified by the OpenTelemetry specification for a span. func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) { - spanCode, valid := func() (codes.Code, bool) { - category := code / 100 - ranges, ok := validRangesPerCategory[category] - if !ok { - return codes.Error, false - } - ok = false - for _, crange := range ranges { - ok = crange.contains(code) - if ok { - break - } - } - if !ok { - return codes.Error, false - } - if category > 0 && category < 4 { - return codes.Unset, true - } - return codes.Error, true - }() + spanCode, valid := validateHTTPStatusCode(code) if !valid { return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code) } - return spanCode, fmt.Sprintf("HTTP status code: %d", code) + return spanCode, "" +} + +// Validates the HTTP status code and returns corresponding span status code. +// If the `code` is not a valid HTTP status code, returns span status Error +// and false. +func validateHTTPStatusCode(code int) (codes.Code, bool) { + category := code / 100 + ranges, ok := validRangesPerCategory[category] + if !ok { + return codes.Error, false + } + ok = false + for _, crange := range ranges { + ok = crange.contains(code) + if ok { + break + } + } + if !ok { + return codes.Error, false + } + if category > 0 && category < 4 { + return codes.Unset, true + } + return codes.Error, true } diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/resource.go b/src/runtime/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/resource.go new file mode 100644 index 000000000..404bd4e75 --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/resource.go @@ -0,0 +1,906 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.4.0" + +import "go.opentelemetry.io/otel/attribute" + +// A cloud environment (e.g. GCP, Azure, AWS) +const ( + // Name of the cloud provider. + // + // Type: Enum + // Required: No + // Stability: stable + // Examples: 'gcp' + CloudProviderKey = attribute.Key("cloud.provider") + // The cloud account ID the resource is assigned to. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + // The geographical region the resource is running. Refer to your provider's docs + // to see the available regions, for example [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure regions](https://azure.microsoft.com/en-us/global- + // infrastructure/geographies/), or [Google Cloud + // regions](https://cloud.google.com/about/locations). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'us-central1', 'us-east-1' + CloudRegionKey = attribute.Key("cloud.region") + // Cloud regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the resource + // is running. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Google Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + // The cloud platform in use. + // + // Type: Enum + // Required: No + // Stability: stable + // Examples: 'aws_ec2', 'azure_vm', 'gcp_compute_engine' + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") +) + +var ( + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") +) + +var ( + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") +) + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws. + // amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + // The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/develo + // perguide/clusters.html). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + // The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/l + // aunch_types.html) for an ECS task. + // + // Type: Enum + // Required: No + // Stability: stable + // Examples: 'ec2', 'fargate' + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + // The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/lates + // t/developerguide/task_definitions.html). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + // The task definition family this task definition is a member of. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + // The revision for this task definition. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // The ARN of an EKS cluster. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// Resources specific to Amazon Web Services. +const ( + // The name(s) of the AWS log group(s) an application is writing to. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like multi-container + // applications, where a single application has sidecar containers, and each write + // to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + // The Amazon Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- + // access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + // The name(s) of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + // The ARN(s) of the AWS log stream(s). + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- + // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- + // access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain + // several log streams, so these ARNs necessarily identify both a log group and a + // log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") +) + +// A container instance. +const ( + // Container name. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + // Container ID. Usually a UUID, as for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container- + // identification). The UUID might be abbreviated. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + // The container runtime managing this container. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") + // Name of the image the container was built on. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + // Container image tag. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '0.1' + ContainerImageTagKey = attribute.Key("container.image.tag") +) + +// The software deployment. +const ( + // Name of the [deployment + // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'staging', 'production' + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// The device on which the process represented by this resource is running. +const ( + // A unique identifier representing the device + // + // Type: string + // Required: No + // Stability: stable + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values outlined + // below. This value is not an advertising identifier and MUST NOT be used as + // such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor id + // entifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-iden + // tifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the + // Firebase Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on best + // practices and exact implementation details. Caution should be taken when + // storing personal data or anything which can identify a user. GDPR and data + // protection laws may apply, ensure you do your own due diligence. + DeviceIDKey = attribute.Key("device.id") + // The model identifier for the device + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine readable version of the + // model identifier rather than the market or consumer-friendly name of the + // device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + // The marketing name for the device model + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human readable version of the + // device model rather than a machine readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") +) + +// A serverless instance. +const ( + // The name of the function being executed. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'my-function' + FaaSNameKey = attribute.Key("faas.name") + // The unique ID of the function being executed. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' + // Note: For example, in AWS Lambda this field corresponds to the + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and- + // namespaces.html) value, in GCP to the URI of the resource, and in Azure to the + // [FunctionDirectory](https://github.com/Azure/azure-functions- + // host/wiki/Retrieving-information-about-the-currently-running-function) field. + FaaSIDKey = attribute.Key("faas.id") + // The version string of the function being executed as defined in [Version + // Attributes](../../resource/semantic_conventions/README.md#version-attributes). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '2.0.0' + FaaSVersionKey = attribute.Key("faas.version") + // The execution environment ID as a string. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'my-function:instance-0001' + FaaSInstanceKey = attribute.Key("faas.instance") + // The amount of memory available to the serverless function in MiB. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 128 + // Note: It's recommended to set this attribute since e.g. too little memory can + // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, + // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this + // information. + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") +) + +// A host is defined as a general computing instance. +const ( + // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud + // provider. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-test' + HostIDKey = attribute.Key("host.id") + // Name of the host. On Unix systems, it may contain what the hostname command + // returns, or the fully qualified hostname, or another name specified by the + // user. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + // Type of host. For Cloud, this must be the machine type. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") + // The CPU architecture the host system is running on. + // + // Type: Enum + // Required: No + // Stability: stable + HostArchKey = attribute.Key("host.arch") + // Name of the VM image or OS install the host was instantiated from. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + // VM image ID. For Cloud, this value is from the provider. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + // The version string of the VM image as defined in [Version + // Attributes](README.md#version-attributes). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// A Kubernetes Cluster. +const ( + // The name of the cluster. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") +) + +// A Kubernetes Node object. +const ( + // The name of the Node. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + // The UID of the Node. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") +) + +// A Kubernetes Namespace. +const ( + // The name of the namespace that the pod is running in. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") +) + +// A Kubernetes Pod object. +const ( + // The UID of the Pod. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + // The name of the Pod. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") +) + +// A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). +const ( + // The name of the Container in a Pod template. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") +) + +// A Kubernetes ReplicaSet object. +const ( + // The UID of the ReplicaSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicasetUIDKey = attribute.Key("k8s.replicaset.uid") + // The name of the ReplicaSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + K8SReplicasetNameKey = attribute.Key("k8s.replicaset.name") +) + +// A Kubernetes Deployment object. +const ( + // The UID of the Deployment. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + // The name of the Deployment. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") +) + +// A Kubernetes StatefulSet object. +const ( + // The UID of the StatefulSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulsetUIDKey = attribute.Key("k8s.statefulset.uid") + // The name of the StatefulSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + K8SStatefulsetNameKey = attribute.Key("k8s.statefulset.name") +) + +// A Kubernetes DaemonSet object. +const ( + // The UID of the DaemonSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonsetUIDKey = attribute.Key("k8s.daemonset.uid") + // The name of the DaemonSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + K8SDaemonsetNameKey = attribute.Key("k8s.daemonset.name") +) + +// A Kubernetes Job object. +const ( + // The UID of the Job. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + // The name of the Job. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") +) + +// A Kubernetes CronJob object. +const ( + // The UID of the CronJob. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + // The name of the CronJob. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") +) + +// The operating system (OS) on which the process represented by this resource is running. +const ( + // The operating system type. + // + // Type: Enum + // Required: Always + // Stability: stable + OSTypeKey = attribute.Key("os.type") + // Human readable (not intended to be parsed) OS version information, like e.g. + // reported by `ver` or `lsb_release -a` commands. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' + OSDescriptionKey = attribute.Key("os.description") + // Human readable operating system name. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + // The version string of the operating system as defined in [Version + // Attributes](../../resource/semantic_conventions/README.md#version-attributes). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// An operating system process. +const ( + // Process identifier (PID). + // + // Type: int + // Required: No + // Stability: stable + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + // The name of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of + // `GetProcessImageFileNameW`. + // + // Type: string + // Required: See below + // Stability: stable + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + // The full path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // Required: See below + // Stability: stable + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + // The command used to launch the process (i.e. the command name). On Linux based + // systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, + // can be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // Required: See below + // Stability: stable + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + // The full command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. Do not + // set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // Required: See below + // Stability: stable + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + // All the command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited strings + // extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be + // the full argv vector passed to `main`. + // + // Type: string[] + // Required: See below + // Stability: stable + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + // The username of the user that owns the process. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") +) + +// The single (language) runtime instance which is monitored. +const ( + // The name of the runtime of this process. For compiled native binaries, this + // SHOULD be the name of the compiler. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + // The version of the runtime of this process, as returned by the runtime without + // modification. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + // An additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") +) + +// A service instance. +const ( + // Logical name of the service. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled services. If + // the value was not specified, SDKs MUST fallback to `unknown_service:` + // concatenated with [`process.executable.name`](process.md#process), e.g. + // `unknown_service:bash`. If `process.executable.name` is not available, the + // value MUST be set to `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + // A namespace for `service.name`. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group of + // services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` is + // expected to be unique for all services that have no explicit namespace defined + // (so the empty/unspecified namespace is simply one more valid namespace). Zero- + // length namespace string is assumed equal to unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + // The string ID of the service instance. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be globally + // unique). The ID helps to distinguish instances of the same service that exist + // at the same time (e.g. instances of a horizontally scaled service). It is + // preferable for the ID to be persistent and stay the same for the lifetime of + // the service instance, however it is acceptable that the ID is ephemeral and + // changes during important lifetime events for the service (e.g. service + // restarts). If the service has no inherent unique ID that can be used as the + // value of this attribute it is recommended to generate a random Version 1 or + // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + ServiceInstanceIDKey = attribute.Key("service.instance.id") + // The version string of the service API or implementation. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '2.0.0' + ServiceVersionKey = attribute.Key("service.version") +) + +// The telemetry SDK used to capture data recorded by the instrumentation libraries. +const ( + // The name of the telemetry SDK as defined above. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + // The language of the telemetry SDK. + // + // Type: Enum + // Required: No + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + // The version string of the telemetry SDK. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") + // The version string of the auto instrumentation agent, if used. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '1.2.3' + TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") +) + +// Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime. +const ( + // The name of the web engine. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + // The version of the web engine. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") + // Additional description of the web engine (e.g. detailed version and edition + // information). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") +) diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/label/doc.go b/src/runtime/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/schema.go similarity index 64% rename from src/runtime/vendor/go.opentelemetry.io/otel/label/doc.go rename to src/runtime/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/schema.go index abdffdb46..41079980e 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/label/doc.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/schema.go @@ -12,9 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package label provides key and value labels. -// -// This package is currently in a pre-GA phase. Backwards incompatible changes -// may be introduced in subsequent minor version releases as we work to track -// the evolving OpenTelemetry specification and user feedback. -package label // import "go.opentelemetry.io/otel/label" +package semconv + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/v1.4.0" diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/trace.go b/src/runtime/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/trace.go new file mode 100644 index 000000000..805eadc9f --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/semconv/v1.4.0/trace.go @@ -0,0 +1,1378 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.4.0" + +import "go.opentelemetry.io/otel/attribute" + +// This document defines the attributes used to perform database client calls. +const ( + // An identifier for the database management system (DBMS) product being used. See + // below for a list of well-known identifiers. + // + // Type: Enum + // Required: Always + // Stability: stable + DBSystemKey = attribute.Key("db.system") + // The connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + DBConnectionStringKey = attribute.Key("db.connection_string") + // Username for accessing the database. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'readonly_user', 'reporting_user' + DBUserKey = attribute.Key("db.user") + // The fully-qualified class name of the [Java Database Connectivity + // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver + // used to connect. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") + // If no [tech-specific attribute](#call-level-attributes-for-specific- + // technologies) is defined, this attribute is used to report the name of the + // database being accessed. For commands that switch the database, this should be + // set to the target database (even if the command fails). + // + // Type: string + // Required: Required, if applicable and no more-specific attribute is defined. + // Stability: stable + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called "schema + // name". + DBNameKey = attribute.Key("db.name") + // The database statement being executed. + // + // Type: string + // Required: Required if applicable and not explicitly disabled via + // instrumentation configuration. + // Stability: stable + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + // Note: The value may be sanitized to exclude sensitive information. + DBStatementKey = attribute.Key("db.statement") + // The name of the operation being executed, e.g. the [MongoDB command + // name](https://docs.mongodb.com/manual/reference/command/#database-operations) + // such as `findAndModify`, or the SQL keyword. + // + // Type: string + // Required: Required, if `db.statement` is not applicable. + // Stability: stable + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to attempt any + // client-side parsing of `db.statement` just to get this property, but it should + // be set if the operation name is provided by the library being instrumented. If + // the SQL statement has an ambiguous operation, or performs more than one + // operation, this value may be omitted. + DBOperationKey = attribute.Key("db.operation") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") +) + +// Connection-level attributes for Microsoft SQL Server +const ( + // The Microsoft SQL Server [instance name](https://docs.microsoft.com/en- + // us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) + // connecting to. This name is used to determine the port of a named instance. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'MSSQLSERVER' + // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no longer + // required (but still recommended if non-standard). + DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") +) + +// Call-level attributes for Cassandra +const ( + // The name of the keyspace being accessed. To be used instead of the generic + // `db.name` attribute. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'mykeyspace' + DBCassandraKeyspaceKey = attribute.Key("db.cassandra.keyspace") + // The fetch size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + // The consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra- + // oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // Required: No + // Stability: stable + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + // The name of the primary table that the operation is acting upon, including the + // schema name (if applicable). + // + // Type: string + // Required: Recommended if available. + // Stability: stable + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra rather + // than sql. It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting upon an + // anonymous table, or more than one table, this value MUST NOT be set. + DBCassandraTableKey = attribute.Key("db.cassandra.table") + // Whether or not the query is idempotent. + // + // Type: boolean + // Required: No + // Stability: stable + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + // The number of times a query was speculatively executed. Not set or `0` if the + // query was not executed speculatively. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") + // The ID of the coordinating node for a query. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + // The data center of the coordinating node for a query. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// Call-level attributes for Apache HBase +const ( + // The [HBase namespace](https://hbase.apache.org/book.html#_namespace) being + // accessed. To be used instead of the generic `db.name` attribute. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'default' + DBHBaseNamespaceKey = attribute.Key("db.hbase.namespace") +) + +// Call-level attributes for Redis +const ( + // The index of the database being accessed as used in the [`SELECT` + // command](https://redis.io/commands/select), provided as an integer. To be used + // instead of the generic `db.name` attribute. + // + // Type: int + // Required: Required, if other than the default database (`0`). + // Stability: stable + // Examples: 0, 1, 15 + DBRedisDBIndexKey = attribute.Key("db.redis.database_index") +) + +// Call-level attributes for MongoDB +const ( + // The collection being accessed within the database stated in `db.name`. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'customers', 'products' + DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") +) + +// Call-level attrbiutes for SQL databases +const ( + // The name of the primary table that the operation is acting upon, including the + // schema name (if applicable). + // + // Type: string + // Required: Recommended if available. + // Stability: stable + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting upon an + // anonymous table, or more than one table, this value MUST NOT be set. + DBSQLTableKey = attribute.Key("db.sql.table") +) + +// This document defines the attributes used to report a single exception associated with a span. +const ( + // The type of the exception (its fully-qualified class name, if applicable). The + // dynamic type of the exception should be preferred over the static type in + // languages that support it. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") + // The exception message. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + // A stacktrace as a string in the natural representation for the language + // runtime. The representation is to be determined and documented by each language + // SIG. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") + // SHOULD be set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // Required: No + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of a span, + // if that span is ended while the exception is still logically "in flight". + // This may be actually "in flight" in some languages (e.g. if the exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most languages. + + // It is usually not possible to determine at the point where an exception is + // thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending the span, + // as done in the [example above](#exception-end-example). + + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") +) + +// This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans. +const ( + // Type of the trigger on which the function is executed. + // + // Type: Enum + // Required: On FaaS instances, faas.trigger MUST be set on incoming invocations. + // Clients invoking FaaS instances MUST set `faas.trigger` on outgoing + // invocations, if it is known to the client. This is, for example, not the case, + // when the transport layer is abstracted in a FaaS client framework without + // access to its configuration. + // Stability: stable + FaaSTriggerKey = attribute.Key("faas.trigger") + // The execution ID of the current function execution. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSExecutionKey = attribute.Key("faas.execution") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write. +const ( + // The name of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos + // DB to the database name. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + // Describes the type of the operation that was performed on the data. + // + // Type: Enum + // Required: Always + // Stability: stable + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + // A string containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed + // in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // Required: Always + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + // The document name/table subjected to the operation. For example, in Cloud + // Storage or S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // A string containing the function invocation time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed + // in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // Required: Always + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + // A string containing the schedule period as [Cron Expression](https://docs.oracl + // e.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") +) + +// Contains additional attributes for incoming FaaS spans. +const ( + // A boolean that is true if the serverless function is executed for the first + // time (aka cold-start). + // + // Type: boolean + // Required: No + // Stability: stable + FaaSColdstartKey = attribute.Key("faas.coldstart") +) + +// Contains additional attributes for outgoing FaaS spans. +const ( + // The name of the invoked function. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked + // function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + // The cloud provider of the invoked function. + // + // Type: Enum + // Required: Always + // Stability: stable + // Examples: 'aws' + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked + // function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + // The cloud region of the invoked function. + // + // Type: string + // Required: For some cloud providers, like AWS or GCP, the region in which a + // function is hosted is essential to uniquely identify the function and also part + // of its endpoint. Since it's part of the endpoint being called, the region is + // always known to clients. In these cases, `faas.invoked_region` MUST be set + // accordingly. If the region is unknown to the client or not required for + // identifying the invoked function, setting `faas.invoked_region` is optional. + // Stability: stable + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked + // function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") +) + +var ( + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") +) + +// These attributes may be used for any network related operation. +const ( + // Transport protocol used. See note below. + // + // Type: Enum + // Required: No + // Stability: stable + // Examples: 'ip_tcp' + NetTransportKey = attribute.Key("net.transport") + // Remote address of the peer (dotted decimal for IPv4 or + // [RFC5952](https://tools.ietf.org/html/rfc5952) for IPv6) + // + // Type: string + // Required: No + // Stability: stable + // Examples: '127.0.0.1' + NetPeerIPKey = attribute.Key("net.peer.ip") + // Remote port number. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 80, 8080, 443 + NetPeerPortKey = attribute.Key("net.peer.port") + // Remote hostname or similar, see note below. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'example.com' + NetPeerNameKey = attribute.Key("net.peer.name") + // Like `net.peer.ip` but for the host IP. Useful in case of a multi-IP host. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '192.168.0.1' + NetHostIPKey = attribute.Key("net.host.ip") + // Like `net.peer.port` but for the host port. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 35555 + NetHostPortKey = attribute.Key("net.host.port") + // Local hostname or similar, see note below. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'localhost' + NetHostNameKey = attribute.Key("net.host.name") +) + +var ( + // ip_tcp + NetTransportTCP = NetTransportKey.String("ip_tcp") + // ip_udp + NetTransportUDP = NetTransportKey.String("ip_udp") + // Another IP-based protocol + NetTransportIP = NetTransportKey.String("ip") + // Unix Domain socket. See below + NetTransportUnix = NetTransportKey.String("unix") + // Named or anonymous pipe. See note below + NetTransportPipe = NetTransportKey.String("pipe") + // In-process communication + NetTransportInProc = NetTransportKey.String("inproc") + // Something else (non IP-based) + NetTransportOther = NetTransportKey.String("other") +) + +// Operations that access some remote service. +const ( + // The [`service.name`](../../resource/semantic_conventions/README.md#service) of + // the remote service. SHOULD be equal to the actual `service.name` resource + // attribute of the remote service if any. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// These attributes may be used for any operation with an authenticated and/or authorized enduser. +const ( + // Username or client_id extracted from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the + // inbound request from outside the system. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + // Actual/assumed role the client is making the request under extracted from token + // or application security context. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + // Scopes or granted authorities the client currently possesses extracted from + // token or application security context. The value would come from the scope + // associated with an [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value + // in a [SAML 2.0 Assertion](http://docs.oasis- + // open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// These attributes may be used for any operation to store information about a thread that started a span. +const ( + // Current "managed" thread ID (as opposed to OS thread ID). + // + // Type: int + // Required: No + // Stability: stable + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + // Current thread name. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// These attributes allow to report this unit of code and therefore to provide more context about the span. +const ( + // The method or function name, or equivalent (usually rightmost part of the code + // unit's name). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + // The "namespace" within which `code.function` is defined. Usually the qualified + // class or module name, such that `code.namespace` + some separator + + // `code.function` form a unique identifier for the code unit. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + // The source code file name that identifies the code unit as uniquely as possible + // (preferably an absolute file path). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + // The line number in `code.filepath` best representing the operation. It SHOULD + // point within the code unit named in `code.function`. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") +) + +// This document defines semantic conventions for HTTP client and server Spans. +const ( + // HTTP request method. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + HTTPMethodKey = attribute.Key("http.method") + // Full HTTP request URL in the form `scheme://host[:port]/path?query[#fragment]`. + // Usually the fragment is not transmitted over HTTP, but if it is known, it + // should be included nevertheless. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + // Note: `http.url` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case the attribute's + // value should be `https://www.example.com/`. + HTTPURLKey = attribute.Key("http.url") + // The full request target as passed in a HTTP request line or equivalent. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '/path/12314/?q=ddds#123' + HTTPTargetKey = attribute.Key("http.target") + // The value of the [HTTP host + // header](https://tools.ietf.org/html/rfc7230#section-5.4). When the header is + // empty or not present, this attribute should be the same. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'www.example.org' + HTTPHostKey = attribute.Key("http.host") + // The URI scheme identifying the used protocol. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'http', 'https' + HTTPSchemeKey = attribute.Key("http.scheme") + // [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // Required: If and only if one was received/sent. + // Stability: stable + // Examples: 200 + HTTPStatusCodeKey = attribute.Key("http.status_code") + // Kind of HTTP protocol used. + // + // Type: Enum + // Required: No + // Stability: stable + // Examples: '1.0' + // Note: If `net.transport` is not specified, it can be assumed to be `IP.TCP` + // except if `http.flavor` is `QUIC`, in which case `IP.UDP` is assumed. + HTTPFlavorKey = attribute.Key("http.flavor") + // Value of the [HTTP User- + // Agent](https://tools.ietf.org/html/rfc7231#section-5.5.3) header sent by the + // client. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' + HTTPUserAgentKey = attribute.Key("http.user_agent") + // The size of the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For + // requests using transport encoding, this should be the compressed size. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 3495 + HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") + // The size of the uncompressed request payload body after transport decoding. Not + // set if transport encoding not used. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 5493 + HTTPRequestContentLengthUncompressedKey = attribute.Key("http.request_content_length_uncompressed") + // The size of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For + // requests using transport encoding, this should be the compressed size. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 3495 + HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") + // The size of the uncompressed response payload body after transport decoding. + // Not set if transport encoding not used. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 5493 + HTTPResponseContentLengthUncompressedKey = attribute.Key("http.response_content_length_uncompressed") +) + +var ( + // HTTP 1.0 + HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") + // HTTP 1.1 + HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") + // HTTP 2 + HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") + // SPDY protocol + HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") + // QUIC protocol + HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") +) + +// Semantic Convention for HTTP Server +const ( + // The primary server name of the matched virtual host. This should be obtained + // via configuration. If no such configuration can be obtained, this attribute + // MUST NOT be set ( `net.host.name` should be used instead). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'example.com' + // Note: `http.url` is usually not readily available on the server side but would + // have to be assembled in a cumbersome and sometimes lossy process from other + // information (see e.g. open-telemetry/opentelemetry-python/pull/148). It is thus + // preferred to supply the raw data that is available. + HTTPServerNameKey = attribute.Key("http.server_name") + // The matched route (path template). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '/users/:userID?' + HTTPRouteKey = attribute.Key("http.route") + // The IP address of the original client behind all proxies, if known (e.g. from + // [X-Forwarded-For](https://developer.mozilla.org/en- + // US/docs/Web/HTTP/Headers/X-Forwarded-For)). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '83.164.160.102' + // Note: This is not necessarily the same as `net.peer.ip`, which would identify + // the network-level peer, which may be a proxy. + HTTPClientIPKey = attribute.Key("http.client_ip") +) + +// Attributes that exist for multiple DynamoDB request types. +const ( + // The keys in the `RequestItems` object field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + // The JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { + // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, + // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": + // "string", "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + // The JSON-serialized value of the `ItemCollectionMetrics` response field. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, + // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : + // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": + // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + // The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. + // + // Type: double + // Required: No + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + // The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // Required: No + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + // The value of the `ConsistentRead` request parameter. + // + // Type: boolean + // Required: No + // Stability: stable + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + // The value of the `ProjectionExpression` request parameter. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, + // ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + // The value of the `Limit` request parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + // The value of the `AttributesToGet` request parameter. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + // The value of the `IndexName` request parameter. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + // The value of the `Select` request parameter. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") +) + +// DynamoDB.CreateTable +const ( + // The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request + // field + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": + // number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + // The JSON-serialized value of each item of the `LocalSecondaryIndexes` request + // field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": + // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") +) + +// DynamoDB.ListTables +const ( + // The value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + // The the number of items in the `TableNames` response parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") +) + +// DynamoDB.Query +const ( + // The value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // Required: No + // Stability: stable + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") +) + +// DynamoDB.Scan +const ( + // The value of the `Segment` request parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + // The value of the `TotalSegments` request parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + // The value of the `Count` response parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + // The value of the `ScannedCount` response parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") +) + +// DynamoDB.UpdateTable +const ( + // The JSON-serialized value of each item in the `AttributeDefinitions` request + // field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + // The JSON-serialized value of each item in the the `GlobalSecondaryIndexUpdates` + // request field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") +) + +// This document defines the attributes used in messaging systems. +const ( + // A string identifying the messaging system. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'kafka', 'rabbitmq', 'activemq', 'AmazonSQS' + MessagingSystemKey = attribute.Key("messaging.system") + // The message destination name. This might be equal to the span name but is + // required nevertheless. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + MessagingDestinationKey = attribute.Key("messaging.destination") + // The kind of message destination + // + // Type: Enum + // Required: Required only if the message destination is either a `queue` or + // `topic`. + // Stability: stable + MessagingDestinationKindKey = attribute.Key("messaging.destination_kind") + // A boolean that is true if the message destination is temporary. + // + // Type: boolean + // Required: If missing, it is assumed to be false. + // Stability: stable + MessagingTempDestinationKey = attribute.Key("messaging.temp_destination") + // The name of the transport protocol. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'AMQP', 'MQTT' + MessagingProtocolKey = attribute.Key("messaging.protocol") + // The version of the transport protocol. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '0.9.1' + MessagingProtocolVersionKey = attribute.Key("messaging.protocol_version") + // Connection string. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'tibjmsnaming://localhost:7222', + // 'https://queue.amazonaws.com/80398EXAMPLE/MyQueue' + MessagingURLKey = attribute.Key("messaging.url") + // A value used by the messaging system as an identifier for the message, + // represented as a string. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message_id") + // The [conversation ID](#conversations) identifying the conversation to which the + // message belongs, represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'MyConversationID' + MessagingConversationIDKey = attribute.Key("messaging.conversation_id") + // The (uncompressed) size of the message payload in bytes. Also use this + // attribute if it is unknown whether the compressed or uncompressed payload size + // is reported. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 2738 + MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message_payload_size_bytes") + // The compressed size of the message payload in bytes. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 2048 + MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message_payload_compressed_size_bytes") +) + +var ( + // A message sent to a queue + MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") + // A message sent to a topic + MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") +) + +// Semantic convention for a consumer of messages received from a messaging system +const ( + // A string identifying the kind of message consumption as defined in the + // [Operation names](#operation-names) section above. If the operation is "send", + // this attribute MUST NOT be set, since the operation can be inferred from the + // span kind in that case. + // + // Type: Enum + // Required: No + // Stability: stable + MessagingOperationKey = attribute.Key("messaging.operation") +) + +var ( + // receive + MessagingOperationReceive = MessagingOperationKey.String("receive") + // process + MessagingOperationProcess = MessagingOperationKey.String("process") +) + +// Attributes for RabbitMQ +const ( + // RabbitMQ message routing key. + // + // Type: string + // Required: Unless it is empty. + // Stability: stable + // Examples: 'myKey' + MessagingRabbitmqRoutingKeyKey = attribute.Key("messaging.rabbitmq.routing_key") +) + +// Attributes for Apache Kafka +const ( + // Message keys in Kafka are used for grouping alike messages to ensure they're + // processed on the same partition. They differ from `messaging.message_id` in + // that they're not unique. If the key is `null`, the attribute MUST NOT be set. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to be + // supplied for the attribute. If the key has no unambiguous, canonical string + // form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message_key") + // Name of the Kafka Consumer Group that is handling the message. Only applies to + // consumers, not producers. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer_group") + // Client ID for the Consumer or Producer that is handling the message. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'client-5' + MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") + // Partition the message is sent to. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 2 + MessagingKafkaPartitionKey = attribute.Key("messaging.kafka.partition") + // A boolean that is true if the message is a tombstone. + // + // Type: boolean + // Required: If missing, it is assumed to be false. + // Stability: stable + MessagingKafkaTombstoneKey = attribute.Key("messaging.kafka.tombstone") +) + +// This document defines semantic conventions for remote procedure calls. +const ( + // A string identifying the remoting system. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'grpc', 'java_rmi', 'wcf' + RPCSystemKey = attribute.Key("rpc.system") + // The full name of the service being called, including its package name, if + // applicable. + // + // Type: string + // Required: No, but recommended + // Stability: stable + // Examples: 'myservice.EchoService' + RPCServiceKey = attribute.Key("rpc.service") + // The name of the method being called, must be equal to the $method part in the + // span name. + // + // Type: string + // Required: No, but recommended + // Stability: stable + // Examples: 'exampleMethod' + RPCMethodKey = attribute.Key("rpc.method") +) + +// Tech-specific attributes for gRPC. +const ( + // The [numeric status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC + // request. + // + // Type: Enum + // Required: Always + // Stability: stable + // Examples: 0, 1, 16 + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). +const ( + // Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC + // 1.0 does not specify this, the value can be omitted. + // + // Type: string + // Required: If missing, it is assumed to be "1.0". + // Stability: stable + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + // `method` property from request. Unlike `rpc.method`, this may not relate to the + // actual method being called. Useful for client-side traces since client does not + // know what will be called on the server. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'users.create', 'get_users' + RPCJsonrpcMethodKey = attribute.Key("rpc.jsonrpc.method") + // `id` property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be cast to + // string for simplicity. Use empty string in case of `null` value. Omit entirely + // if this is a notification. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + // `error.code` property of response if it is an error response. + // + // Type: int + // Required: If missing, response is assumed to be successful. + // Stability: stable + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + // `error.message` property of response if it is an error response. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") +) diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/trace.go b/src/runtime/vendor/go.opentelemetry.io/otel/trace.go index 281592e94..28b4f5e4d 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/trace.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/trace.go @@ -22,18 +22,18 @@ import ( // Tracer creates a named tracer that implements Tracer interface. // If the name is an empty string then provider uses default name. // -// This is short for TracerProvider().Tracer(name) -func Tracer(name string) trace.Tracer { - return GetTracerProvider().Tracer(name) +// This is short for GetTracerProvider().Tracer(name, opts...) +func Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + return GetTracerProvider().Tracer(name, opts...) } -// TracerProvider returns the registered global trace provider. +// GetTracerProvider returns the registered global trace provider. // If none is registered then an instance of NoopTracerProvider is returned. // // Use the trace provider to create a named tracer. E.g. -// tracer := global.GetTracerProvider().Tracer("example.com/foo") +// tracer := otel.GetTracerProvider().Tracer("example.com/foo") // or -// tracer := global.Tracer("example.com/foo") +// tracer := otel.Tracer("example.com/foo") func GetTracerProvider() trace.TracerProvider { return global.TracerProvider() } diff --git a/src/runtime/vendor/github.com/apache/thrift/LICENSE b/src/runtime/vendor/go.opentelemetry.io/otel/trace/LICENSE similarity index 90% rename from src/runtime/vendor/github.com/apache/thrift/LICENSE rename to src/runtime/vendor/go.opentelemetry.io/otel/trace/LICENSE index 3b6d7d74c..261eeb9e9 100644 --- a/src/runtime/vendor/github.com/apache/thrift/LICENSE +++ b/src/runtime/vendor/go.opentelemetry.io/otel/trace/LICENSE @@ -1,4 +1,3 @@ - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -200,40 +199,3 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - --------------------------------------------------- -SOFTWARE DISTRIBUTED WITH THRIFT: - -The Apache Thrift software includes a number of subcomponents with -separate copyright notices and license terms. Your use of the source -code for the these subcomponents is subject to the terms and -conditions of the following licenses. - --------------------------------------------------- -Portions of the following files are licensed under the MIT License: - - lib/erl/src/Makefile.am - -Please see doc/otp-base-license.txt for the full terms of this license. - --------------------------------------------------- -For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components: - -# Copyright (c) 2007 Thomas Porschberg -# -# Copying and distribution of this file, with or without -# modification, are permitted in any medium without royalty provided -# the copyright notice and this notice are preserved. - --------------------------------------------------- -For the lib/nodejs/lib/thrift/json_parse.js: - -/* - json_parse.js - 2015-05-02 - Public Domain. - NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. - -*/ -(By Douglas Crockford ) --------------------------------------------------- diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/trace/config.go b/src/runtime/vendor/go.opentelemetry.io/otel/trace/config.go index 9a343633d..b1b74cb72 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/trace/config.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/trace/config.go @@ -12,106 +12,207 @@ // See the License for the specific language governing permissions and // limitations under the License. -package trace +package trace // import "go.opentelemetry.io/otel/trace" import ( "time" - "go.opentelemetry.io/otel/label" + "go.opentelemetry.io/otel/attribute" ) // TracerConfig is a group of options for a Tracer. type TracerConfig struct { - // InstrumentationVersion is the version of the library providing - // instrumentation. - InstrumentationVersion string + instrumentationVersion string + // Schema URL of the telemetry emitted by the Tracer. + schemaURL string +} + +// InstrumentationVersion returns the version of the library providing instrumentation. +func (t *TracerConfig) InstrumentationVersion() string { + return t.instrumentationVersion +} + +// SchemaURL returns the Schema URL of the telemetry emitted by the Tracer. +func (t *TracerConfig) SchemaURL() string { + return t.schemaURL } // NewTracerConfig applies all the options to a returned TracerConfig. -func NewTracerConfig(options ...TracerOption) *TracerConfig { - config := new(TracerConfig) +func NewTracerConfig(options ...TracerOption) TracerConfig { + var config TracerConfig for _, option := range options { - option.ApplyTracer(config) + option.apply(&config) } return config } // TracerOption applies an option to a TracerConfig. type TracerOption interface { - ApplyTracer(*TracerConfig) + apply(*TracerConfig) +} + +type tracerOptionFunc func(*TracerConfig) + +func (fn tracerOptionFunc) apply(cfg *TracerConfig) { + fn(cfg) } // SpanConfig is a group of options for a Span. type SpanConfig struct { - // Attributes describe the associated qualities of a Span. - Attributes []label.KeyValue - // Timestamp is a time in a Span life-cycle. - Timestamp time.Time - // Links are the associations a Span has with other Spans. - Links []Link - // Record is the recording state of a Span. - Record bool - // NewRoot identifies a Span as the root Span for a new trace. This is - // commonly used when an existing trace crosses trust boundaries and the - // remote parent span context should be ignored for security. - NewRoot bool - // SpanKind is the role a Span has in a trace. - SpanKind SpanKind -} - -// NewSpanConfig applies all the options to a returned SpanConfig. + attributes []attribute.KeyValue + timestamp time.Time + links []Link + newRoot bool + spanKind SpanKind + stackTrace bool +} + +// Attributes describe the associated qualities of a Span. +func (cfg *SpanConfig) Attributes() []attribute.KeyValue { + return cfg.attributes +} + +// Timestamp is a time in a Span life-cycle. +func (cfg *SpanConfig) Timestamp() time.Time { + return cfg.timestamp +} + +// StackTrace checks whether stack trace capturing is enabled. +func (cfg *SpanConfig) StackTrace() bool { + return cfg.stackTrace +} + +// Links are the associations a Span has with other Spans. +func (cfg *SpanConfig) Links() []Link { + return cfg.links +} + +// NewRoot identifies a Span as the root Span for a new trace. This is +// commonly used when an existing trace crosses trust boundaries and the +// remote parent span context should be ignored for security. +func (cfg *SpanConfig) NewRoot() bool { + return cfg.newRoot +} + +// SpanKind is the role a Span has in a trace. +func (cfg *SpanConfig) SpanKind() SpanKind { + return cfg.spanKind +} + +// NewSpanStartConfig applies all the options to a returned SpanConfig. // No validation is performed on the returned SpanConfig (e.g. no uniqueness // checking or bounding of data), it is left to the SDK to perform this // action. -func NewSpanConfig(options ...SpanOption) *SpanConfig { - c := new(SpanConfig) +func NewSpanStartConfig(options ...SpanStartOption) SpanConfig { + var c SpanConfig for _, option := range options { - option.ApplySpan(c) + option.applySpanStart(&c) } return c } -// SpanOption applies an option to a SpanConfig. -type SpanOption interface { - ApplySpan(*SpanConfig) +// NewSpanEndConfig applies all the options to a returned SpanConfig. +// No validation is performed on the returned SpanConfig (e.g. no uniqueness +// checking or bounding of data), it is left to the SDK to perform this +// action. +func NewSpanEndConfig(options ...SpanEndOption) SpanConfig { + var c SpanConfig + for _, option := range options { + option.applySpanEnd(&c) + } + return c +} + +// SpanStartOption applies an option to a SpanConfig. These options are applicable +// only when the span is created +type SpanStartOption interface { + applySpanStart(*SpanConfig) +} + +type spanOptionFunc func(*SpanConfig) + +func (fn spanOptionFunc) applySpanStart(cfg *SpanConfig) { + fn(cfg) +} + +// SpanEndOption applies an option to a SpanConfig. These options are +// applicable only when the span is ended. +type SpanEndOption interface { + applySpanEnd(*SpanConfig) +} + +// EventConfig is a group of options for an Event. +type EventConfig struct { + attributes []attribute.KeyValue + timestamp time.Time + stackTrace bool +} + +// Attributes describe the associated qualities of an Event. +func (cfg *EventConfig) Attributes() []attribute.KeyValue { + return cfg.attributes +} + +// Timestamp is a time in an Event life-cycle. +func (cfg *EventConfig) Timestamp() time.Time { + return cfg.timestamp +} + +// StackTrace checks whether stack trace capturing is enabled. +func (cfg *EventConfig) StackTrace() bool { + return cfg.stackTrace } -// NewEventConfig applies all the EventOptions to a returned SpanConfig. If no -// timestamp option is passed, the returned SpanConfig will have a Timestamp +// NewEventConfig applies all the EventOptions to a returned EventConfig. If no +// timestamp option is passed, the returned EventConfig will have a Timestamp // set to the call time, otherwise no validation is performed on the returned -// SpanConfig. -func NewEventConfig(options ...EventOption) *SpanConfig { - c := new(SpanConfig) +// EventConfig. +func NewEventConfig(options ...EventOption) EventConfig { + var c EventConfig for _, option := range options { - option.ApplyEvent(c) + option.applyEvent(&c) } - if c.Timestamp.IsZero() { - c.Timestamp = time.Now() + if c.timestamp.IsZero() { + c.timestamp = time.Now() } return c } -// EventOption applies span event options to a SpanConfig. +// EventOption applies span event options to an EventConfig. type EventOption interface { - ApplyEvent(*SpanConfig) + applyEvent(*EventConfig) } -// LifeCycleOption applies span life-cycle options to a SpanConfig. These -// options set values releated to events in a spans life-cycle like starting, -// ending, experiencing an error and other user defined notable events. -type LifeCycleOption interface { - SpanOption +// SpanOption are options that can be used at both the beginning and end of a span. +type SpanOption interface { + SpanStartOption + SpanEndOption +} + +// SpanStartEventOption are options that can be used at the start of a span, or with an event. +type SpanStartEventOption interface { + SpanStartOption EventOption } -type attributeSpanOption []label.KeyValue +// SpanEndEventOption are options that can be used at the end of a span, or with an event. +type SpanEndEventOption interface { + SpanEndOption + EventOption +} -func (o attributeSpanOption) ApplySpan(c *SpanConfig) { o.apply(c) } -func (o attributeSpanOption) ApplyEvent(c *SpanConfig) { o.apply(c) } -func (o attributeSpanOption) apply(c *SpanConfig) { - c.Attributes = append(c.Attributes, []label.KeyValue(o)...) +type attributeOption []attribute.KeyValue + +func (o attributeOption) applySpan(c *SpanConfig) { + c.attributes = append(c.attributes, []attribute.KeyValue(o)...) +} +func (o attributeOption) applySpanStart(c *SpanConfig) { o.applySpan(c) } +func (o attributeOption) applyEvent(c *EventConfig) { + c.attributes = append(c.attributes, []attribute.KeyValue(o)...) } +var _ SpanStartEventOption = attributeOption{} + // WithAttributes adds the attributes related to a span life-cycle event. // These attributes are used to describe the work a Span represents when this // option is provided to a Span's start or end events. Otherwise, these @@ -121,76 +222,76 @@ func (o attributeSpanOption) apply(c *SpanConfig) { // If multiple of these options are passed the attributes of each successive // option will extend the attributes instead of overwriting. There is no // guarantee of uniqueness in the resulting attributes. -func WithAttributes(attributes ...label.KeyValue) LifeCycleOption { - return attributeSpanOption(attributes) +func WithAttributes(attributes ...attribute.KeyValue) SpanStartEventOption { + return attributeOption(attributes) } -type timestampSpanOption time.Time - -func (o timestampSpanOption) ApplySpan(c *SpanConfig) { o.apply(c) } -func (o timestampSpanOption) ApplyEvent(c *SpanConfig) { o.apply(c) } -func (o timestampSpanOption) apply(c *SpanConfig) { c.Timestamp = time.Time(o) } - -// WithTimestamp sets the time of a Span life-cycle moment (e.g. started, -// stopped, errored). -func WithTimestamp(t time.Time) LifeCycleOption { - return timestampSpanOption(t) +// SpanEventOption are options that can be used with an event or a span. +type SpanEventOption interface { + SpanOption + EventOption } -type linksSpanOption []Link +type timestampOption time.Time -func (o linksSpanOption) ApplySpan(c *SpanConfig) { c.Links = append(c.Links, []Link(o)...) } +func (o timestampOption) applySpan(c *SpanConfig) { c.timestamp = time.Time(o) } +func (o timestampOption) applySpanStart(c *SpanConfig) { o.applySpan(c) } +func (o timestampOption) applySpanEnd(c *SpanConfig) { o.applySpan(c) } +func (o timestampOption) applyEvent(c *EventConfig) { c.timestamp = time.Time(o) } -// WithLinks adds links to a Span. The links are added to the existing Span -// links, i.e. this does not overwrite. -func WithLinks(links ...Link) SpanOption { - return linksSpanOption(links) +var _ SpanEventOption = timestampOption{} + +// WithTimestamp sets the time of a Span or Event life-cycle moment (e.g. +// started, stopped, errored). +func WithTimestamp(t time.Time) SpanEventOption { + return timestampOption(t) } -type recordSpanOption bool +type stackTraceOption bool -func (o recordSpanOption) ApplySpan(c *SpanConfig) { c.Record = bool(o) } +func (o stackTraceOption) applyEvent(c *EventConfig) { c.stackTrace = bool(o) } +func (o stackTraceOption) applySpan(c *SpanConfig) { c.stackTrace = bool(o) } +func (o stackTraceOption) applySpanEnd(c *SpanConfig) { o.applySpan(c) } -// WithRecord specifies that the span should be recorded. It is important to -// note that implementations may override this option, i.e. if the span is a -// child of an un-sampled trace. -func WithRecord() SpanOption { - return recordSpanOption(true) +// WithStackTrace sets the flag to capture the error with stack trace (e.g. true, false). +func WithStackTrace(b bool) SpanEndEventOption { + return stackTraceOption(b) } -type newRootSpanOption bool - -func (o newRootSpanOption) ApplySpan(c *SpanConfig) { c.NewRoot = bool(o) } +// WithLinks adds links to a Span. The links are added to the existing Span +// links, i.e. this does not overwrite. +func WithLinks(links ...Link) SpanStartOption { + return spanOptionFunc(func(cfg *SpanConfig) { + cfg.links = append(cfg.links, links...) + }) +} // WithNewRoot specifies that the Span should be treated as a root Span. Any // existing parent span context will be ignored when defining the Span's trace // identifiers. -func WithNewRoot() SpanOption { - return newRootSpanOption(true) +func WithNewRoot() SpanStartOption { + return spanOptionFunc(func(cfg *SpanConfig) { + cfg.newRoot = true + }) } -type spanKindSpanOption SpanKind - -func (o spanKindSpanOption) ApplySpan(c *SpanConfig) { c.SpanKind = SpanKind(o) } - // WithSpanKind sets the SpanKind of a Span. -func WithSpanKind(kind SpanKind) SpanOption { - return spanKindSpanOption(kind) -} - -// InstrumentationOption is an interface for applying instrumentation specific -// options. -type InstrumentationOption interface { - TracerOption +func WithSpanKind(kind SpanKind) SpanStartOption { + return spanOptionFunc(func(cfg *SpanConfig) { + cfg.spanKind = kind + }) } // WithInstrumentationVersion sets the instrumentation version. -func WithInstrumentationVersion(version string) InstrumentationOption { - return instrumentationVersionOption(version) +func WithInstrumentationVersion(version string) TracerOption { + return tracerOptionFunc(func(cfg *TracerConfig) { + cfg.instrumentationVersion = version + }) } -type instrumentationVersionOption string - -func (i instrumentationVersionOption) ApplyTracer(config *TracerConfig) { - config.InstrumentationVersion = string(i) +// WithSchemaURL sets the schema URL for the Tracer. +func WithSchemaURL(schemaURL string) TracerOption { + return tracerOptionFunc(func(cfg *TracerConfig) { + cfg.schemaURL = schemaURL + }) } diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/trace/context.go b/src/runtime/vendor/go.opentelemetry.io/otel/trace/context.go new file mode 100644 index 000000000..76f9a083c --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/trace/context.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +import "context" + +type traceContextKeyType int + +const currentSpanKey traceContextKeyType = iota + +// ContextWithSpan returns a copy of parent with span set as the current Span. +func ContextWithSpan(parent context.Context, span Span) context.Context { + return context.WithValue(parent, currentSpanKey, span) +} + +// ContextWithSpanContext returns a copy of parent with sc as the current +// Span. The Span implementation that wraps sc is non-recording and performs +// no operations other than to return sc as the SpanContext from the +// SpanContext method. +func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Context { + return ContextWithSpan(parent, nonRecordingSpan{sc: sc}) +} + +// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicly +// as a remote SpanContext and as the current Span. The Span implementation +// that wraps rsc is non-recording and performs no operations other than to +// return rsc as the SpanContext from the SpanContext method. +func ContextWithRemoteSpanContext(parent context.Context, rsc SpanContext) context.Context { + return ContextWithSpanContext(parent, rsc.WithRemote(true)) +} + +// SpanFromContext returns the current Span from ctx. +// +// If no Span is currently set in ctx an implementation of a Span that +// performs no operations is returned. +func SpanFromContext(ctx context.Context) Span { + if ctx == nil { + return noopSpan{} + } + if span, ok := ctx.Value(currentSpanKey).(Span); ok { + return span + } + return noopSpan{} +} + +// SpanContextFromContext returns the current Span's SpanContext. +func SpanContextFromContext(ctx context.Context) SpanContext { + return SpanFromContext(ctx).SpanContext() +} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/trace/doc.go b/src/runtime/vendor/go.opentelemetry.io/otel/trace/doc.go index c962f3bc6..391417718 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/trace/doc.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/trace/doc.go @@ -16,10 +16,6 @@ Package trace provides an implementation of the tracing part of the OpenTelemetry API. -This package is currently in a pre-GA phase. Backwards incompatible changes -may be introduced in subsequent minor version releases as we work to track the -evolving OpenTelemetry specification and user feedback. - To participate in distributed traces a Span needs to be created for the operation being performed as part of a traced workflow. It its simplest form: diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/trace/go.mod b/src/runtime/vendor/go.opentelemetry.io/otel/trace/go.mod new file mode 100644 index 000000000..fac348c9f --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/trace/go.mod @@ -0,0 +1,71 @@ +module go.opentelemetry.io/otel/trace + +go 1.15 + +replace go.opentelemetry.io/otel => ../ + +replace go.opentelemetry.io/otel/bridge/opencensus => ../bridge/opencensus + +replace go.opentelemetry.io/otel/bridge/opentracing => ../bridge/opentracing + +replace go.opentelemetry.io/otel/example/jaeger => ../example/jaeger + +replace go.opentelemetry.io/otel/example/namedtracer => ../example/namedtracer + +replace go.opentelemetry.io/otel/example/opencensus => ../example/opencensus + +replace go.opentelemetry.io/otel/example/otel-collector => ../example/otel-collector + +replace go.opentelemetry.io/otel/example/prom-collector => ../example/prom-collector + +replace go.opentelemetry.io/otel/example/prometheus => ../example/prometheus + +replace go.opentelemetry.io/otel/example/zipkin => ../example/zipkin + +replace go.opentelemetry.io/otel/exporters/prometheus => ../exporters/prometheus + +replace go.opentelemetry.io/otel/exporters/jaeger => ../exporters/jaeger + +replace go.opentelemetry.io/otel/exporters/zipkin => ../exporters/zipkin + +replace go.opentelemetry.io/otel/internal/tools => ../internal/tools + +replace go.opentelemetry.io/otel/metric => ../metric + +replace go.opentelemetry.io/otel/sdk => ../sdk + +replace go.opentelemetry.io/otel/sdk/export/metric => ../sdk/export/metric + +replace go.opentelemetry.io/otel/sdk/metric => ../sdk/metric + +replace go.opentelemetry.io/otel/trace => ./ + +require ( + github.com/google/go-cmp v0.5.6 + github.com/stretchr/testify v1.7.0 + go.opentelemetry.io/otel v1.0.0 +) + +replace go.opentelemetry.io/otel/example/passthrough => ../example/passthrough + +replace go.opentelemetry.io/otel/exporters/otlp/otlptrace => ../exporters/otlp/otlptrace + +replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => ../exporters/otlp/otlptrace/otlptracegrpc + +replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../exporters/otlp/otlptrace/otlptracehttp + +replace go.opentelemetry.io/otel/internal/metric => ../internal/metric + +replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../exporters/otlp/otlpmetric + +replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../exporters/otlp/otlpmetric/otlpmetricgrpc + +replace go.opentelemetry.io/otel/exporters/stdout/stdoutmetric => ../exporters/stdout/stdoutmetric + +replace go.opentelemetry.io/otel/exporters/stdout/stdouttrace => ../exporters/stdout/stdouttrace + +replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp => ../exporters/otlp/otlpmetric/otlpmetrichttp + +replace go.opentelemetry.io/otel/bridge/opencensus/test => ../bridge/opencensus/test + +replace go.opentelemetry.io/otel/example/fib => ../example/fib diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/trace/go.sum b/src/runtime/vendor/go.opentelemetry.io/otel/trace/go.sum new file mode 100644 index 000000000..f212493d5 --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/trace/go.sum @@ -0,0 +1,15 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/trace_go11.go b/src/runtime/vendor/go.opentelemetry.io/otel/trace/nonrecording.go similarity index 60% rename from src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/trace_go11.go rename to src/runtime/vendor/go.opentelemetry.io/otel/trace/nonrecording.go index 51586a538..88fcb8161 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/sdk/trace/trace_go11.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/trace/nonrecording.go @@ -12,21 +12,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build go1.11 +package trace // import "go.opentelemetry.io/otel/trace" -package trace // import "go.opentelemetry.io/otel/sdk/trace" +// nonRecordingSpan is a minimal implementation of a Span that wraps a +// SpanContext. It performs no operations other than to return the wrapped +// SpanContext. +type nonRecordingSpan struct { + noopSpan -import ( - "context" - rt "runtime/trace" -) - -func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) { - if !rt.IsEnabled() { - // Avoid additional overhead if - // runtime/trace is not enabled. - return ctx, func() {} - } - nctx, task := rt.NewTask(ctx, name) - return nctx, task.End + sc SpanContext } + +// SpanContext returns the wrapped SpanContext. +func (s nonRecordingSpan) SpanContext() SpanContext { return s.sc } diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/trace/trace_noop.go b/src/runtime/vendor/go.opentelemetry.io/otel/trace/noop.go similarity index 78% rename from src/runtime/vendor/go.opentelemetry.io/otel/trace/trace_noop.go rename to src/runtime/vendor/go.opentelemetry.io/otel/trace/noop.go index 6f18a9481..ad9a9fc5b 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/trace/trace_noop.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/trace/noop.go @@ -17,8 +17,8 @@ package trace // import "go.opentelemetry.io/otel/trace" import ( "context" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/label" ) // NewNoopTracerProvider returns an implementation of TracerProvider that @@ -42,16 +42,21 @@ type noopTracer struct{} var _ Tracer = noopTracer{} -// Start starts a noop span. -func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanOption) (context.Context, Span) { - span := noopSpan{} +// Start carries forward a non-recording Span, if one is present in the context, otherwise it +// creates a no-op Span. +func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption) (context.Context, Span) { + span := SpanFromContext(ctx) + if _, ok := span.(nonRecordingSpan); !ok { + // span is likely already a noopSpan, but let's be sure + span = noopSpan{} + } return ContextWithSpan(ctx, span), span } // noopSpan is an implementation of Span that preforms no operations. type noopSpan struct{} -var _ noopSpan = noopSpan{} +var _ Span = noopSpan{} // SpanContext returns an empty span context. func (noopSpan) SpanContext() SpanContext { return SpanContext{} } @@ -66,19 +71,19 @@ func (noopSpan) SetStatus(codes.Code, string) {} func (noopSpan) SetError(bool) {} // SetAttributes does nothing. -func (noopSpan) SetAttributes(...label.KeyValue) {} +func (noopSpan) SetAttributes(...attribute.KeyValue) {} // End does nothing. -func (noopSpan) End(...SpanOption) {} +func (noopSpan) End(...SpanEndOption) {} // RecordError does nothing. func (noopSpan) RecordError(error, ...EventOption) {} -// Tracer returns the Tracer that created this Span. -func (noopSpan) Tracer() Tracer { return noopTracer{} } - // AddEvent does nothing. func (noopSpan) AddEvent(string, ...EventOption) {} // SetName does nothing. func (noopSpan) SetName(string) {} + +// TracerProvider returns a no-op TracerProvider +func (noopSpan) TracerProvider() TracerProvider { return noopTracerProvider{} } diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/trace/trace.go b/src/runtime/vendor/go.opentelemetry.io/otel/trace/trace.go index 76016e5bb..0923ceb98 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/trace/trace.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -20,20 +20,14 @@ import ( "encoding/hex" "encoding/json" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/label" ) const ( // FlagsSampled is a bitmask with the sampled bit set. A SpanContext // with the sampling bit set means the span is sampled. - FlagsSampled = byte(0x01) - // FlagsDeferred is a bitmask with the deferred bit set. A SpanContext - // with the deferred bit set means the sampling decision has been - // defered to the receiver. - FlagsDeferred = byte(0x02) - // FlagsDebug is a bitmask with the debug bit set. - FlagsDebug = byte(0x04) + FlagsSampled = TraceFlags(0x01) errInvalidHexID errorConst = "trace-id and span-id can only contain [0-9a-f] characters, all lowercase" @@ -51,7 +45,7 @@ func (e errorConst) Error() string { } // TraceID is a unique identity of a trace. -// nolint:golint +// nolint:revive // revive complains about stutter of `trace.TraceID`. type TraceID [16]byte var nilTraceID TraceID @@ -100,7 +94,7 @@ func (s SpanID) String() string { // TraceIDFromHex returns a TraceID from a hex string if it is compliant with // the W3C trace-context specification. See more at // https://www.w3.org/TR/trace-context/#trace-id -// nolint:golint +// nolint:revive // revive complains about stutter of `trace.TraceIDFromHex`. func TraceIDFromHex(h string) (TraceID, error) { t := TraceID{} if len(h) != 32 { @@ -157,100 +151,200 @@ func decodeHex(h string, b []byte) error { return nil } -// SpanContext contains identifying trace information about a Span. -type SpanContext struct { +// TraceFlags contains flags that can be set on a SpanContext +type TraceFlags byte //nolint:revive // revive complains about stutter of `trace.TraceFlags`. + +// IsSampled returns if the sampling bit is set in the TraceFlags. +func (tf TraceFlags) IsSampled() bool { + return tf&FlagsSampled == FlagsSampled +} + +// WithSampled sets the sampling bit in a new copy of the TraceFlags. +func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { + if sampled { + return tf | FlagsSampled + } + + return tf &^ FlagsSampled +} + +// MarshalJSON implements a custom marshal function to encode TraceFlags +// as a hex string. +func (tf TraceFlags) MarshalJSON() ([]byte, error) { + return json.Marshal(tf.String()) +} + +// String returns the hex string representation form of TraceFlags +func (tf TraceFlags) String() string { + return hex.EncodeToString([]byte{byte(tf)}[:]) +} + +// SpanContextConfig contains mutable fields usable for constructing +// an immutable SpanContext. +type SpanContextConfig struct { TraceID TraceID SpanID SpanID - TraceFlags byte + TraceFlags TraceFlags + TraceState TraceState + Remote bool +} + +// NewSpanContext constructs a SpanContext using values from the provided +// SpanContextConfig. +func NewSpanContext(config SpanContextConfig) SpanContext { + return SpanContext{ + traceID: config.TraceID, + spanID: config.SpanID, + traceFlags: config.TraceFlags, + traceState: config.TraceState, + remote: config.Remote, + } } +// SpanContext contains identifying trace information about a Span. +type SpanContext struct { + traceID TraceID + spanID SpanID + traceFlags TraceFlags + traceState TraceState + remote bool +} + +var _ json.Marshaler = SpanContext{} + // IsValid returns if the SpanContext is valid. A valid span context has a // valid TraceID and SpanID. func (sc SpanContext) IsValid() bool { return sc.HasTraceID() && sc.HasSpanID() } +// IsRemote indicates whether the SpanContext represents a remotely-created Span. +func (sc SpanContext) IsRemote() bool { + return sc.remote +} + +// WithRemote returns a copy of sc with the Remote property set to remote. +func (sc SpanContext) WithRemote(remote bool) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: sc.spanID, + traceFlags: sc.traceFlags, + traceState: sc.traceState, + remote: remote, + } +} + +// TraceID returns the TraceID from the SpanContext. +func (sc SpanContext) TraceID() TraceID { + return sc.traceID +} + // HasTraceID checks if the SpanContext has a valid TraceID. func (sc SpanContext) HasTraceID() bool { - return sc.TraceID.IsValid() + return sc.traceID.IsValid() +} + +// WithTraceID returns a new SpanContext with the TraceID replaced. +func (sc SpanContext) WithTraceID(traceID TraceID) SpanContext { + return SpanContext{ + traceID: traceID, + spanID: sc.spanID, + traceFlags: sc.traceFlags, + traceState: sc.traceState, + remote: sc.remote, + } +} + +// SpanID returns the SpanID from the SpanContext. +func (sc SpanContext) SpanID() SpanID { + return sc.spanID } // HasSpanID checks if the SpanContext has a valid SpanID. func (sc SpanContext) HasSpanID() bool { - return sc.SpanID.IsValid() + return sc.spanID.IsValid() } -// IsDeferred returns if the deferred bit is set in the trace flags. -func (sc SpanContext) IsDeferred() bool { - return sc.TraceFlags&FlagsDeferred == FlagsDeferred +// WithSpanID returns a new SpanContext with the SpanID replaced. +func (sc SpanContext) WithSpanID(spanID SpanID) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: spanID, + traceFlags: sc.traceFlags, + traceState: sc.traceState, + remote: sc.remote, + } } -// IsDebug returns if the debug bit is set in the trace flags. -func (sc SpanContext) IsDebug() bool { - return sc.TraceFlags&FlagsDebug == FlagsDebug +// TraceFlags returns the flags from the SpanContext. +func (sc SpanContext) TraceFlags() TraceFlags { + return sc.traceFlags } -// IsSampled returns if the sampling bit is set in the trace flags. +// IsSampled returns if the sampling bit is set in the SpanContext's TraceFlags. func (sc SpanContext) IsSampled() bool { - return sc.TraceFlags&FlagsSampled == FlagsSampled + return sc.traceFlags.IsSampled() } -type traceContextKeyType int - -const ( - currentSpanKey traceContextKeyType = iota - remoteContextKey -) - -// ContextWithSpan returns a copy of parent with span set to current. -func ContextWithSpan(parent context.Context, span Span) context.Context { - return context.WithValue(parent, currentSpanKey, span) +// WithTraceFlags returns a new SpanContext with the TraceFlags replaced. +func (sc SpanContext) WithTraceFlags(flags TraceFlags) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: sc.spanID, + traceFlags: flags, + traceState: sc.traceState, + remote: sc.remote, + } } -// SpanFromContext returns the current span from ctx, or noop span if none set. -func SpanFromContext(ctx context.Context) Span { - if span, ok := ctx.Value(currentSpanKey).(Span); ok { - return span - } - return noopSpan{} +// TraceState returns the TraceState from the SpanContext. +func (sc SpanContext) TraceState() TraceState { + return sc.traceState } -// SpanContextFromContext returns the current SpanContext from ctx, or an empty SpanContext if none set. -func SpanContextFromContext(ctx context.Context) SpanContext { - if span := SpanFromContext(ctx); span != nil { - return span.SpanContext() +// WithTraceState returns a new SpanContext with the TraceState replaced. +func (sc SpanContext) WithTraceState(state TraceState) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: sc.spanID, + traceFlags: sc.traceFlags, + traceState: state, + remote: sc.remote, } - return SpanContext{} } -// ContextWithRemoteSpanContext returns a copy of parent with a remote set as -// the remote span context. -func ContextWithRemoteSpanContext(parent context.Context, remote SpanContext) context.Context { - return context.WithValue(parent, remoteContextKey, remote) +// Equal is a predicate that determines whether two SpanContext values are equal. +func (sc SpanContext) Equal(other SpanContext) bool { + return sc.traceID == other.traceID && + sc.spanID == other.spanID && + sc.traceFlags == other.traceFlags && + sc.traceState.String() == other.traceState.String() && + sc.remote == other.remote } -// RemoteSpanContextFromContext returns the remote span context from ctx. -func RemoteSpanContextFromContext(ctx context.Context) SpanContext { - if sc, ok := ctx.Value(remoteContextKey).(SpanContext); ok { - return sc - } - return SpanContext{} +// MarshalJSON implements a custom marshal function to encode a SpanContext. +func (sc SpanContext) MarshalJSON() ([]byte, error) { + return json.Marshal(SpanContextConfig{ + TraceID: sc.traceID, + SpanID: sc.spanID, + TraceFlags: sc.traceFlags, + TraceState: sc.traceState, + Remote: sc.remote, + }) } // Span is the individual component of a trace. It represents a single named // and timed operation of a workflow that is traced. A Tracer is used to // create a Span and it is then up to the operation the Span represents to // properly end the Span when the operation itself ends. +// +// Warning: methods may be added to this interface in minor releases. type Span interface { - // Tracer returns the Tracer that created the Span. Tracer MUST NOT be - // nil. - Tracer() Tracer - // End completes the Span. The Span is considered complete and ready to be // delivered through the rest of the telemetry pipeline after this method // is called. Therefore, updates to the Span are not allowed after this // method has been called. - End(options ...SpanOption) + End(options ...SpanEndOption) // AddEvent adds an event with the provided name and options. AddEvent(name string, options ...EventOption) @@ -259,17 +353,20 @@ type Span interface { // true if the Span is active and events can be recorded. IsRecording() bool - // RecordError records an error as a Span event. + // RecordError will record err as an exception span event for this span. An + // additional call to SetStatus is required if the Status of the Span should + // be set to Error, as this method does not change the Span status. If this + // span is not being recorded or err is nil then this method does nothing. RecordError(err error, options ...EventOption) - // SpanContext returns the SpanContext of the Span. The returned - // SpanContext is usable even after the End has been called for the Span. + // SpanContext returns the SpanContext of the Span. The returned SpanContext + // is usable even after the End method has been called for the Span. SpanContext() SpanContext // SetStatus sets the status of the Span in the form of a code and a - // message. SetStatus overrides the value of previous calls to SetStatus - // on the Span. - SetStatus(code codes.Code, msg string) + // description, overriding previous values set. The description is only + // included in a status when the code is for an error. + SetStatus(code codes.Code, description string) // SetName sets the Span name. SetName(name string) @@ -277,7 +374,11 @@ type Span interface { // SetAttributes sets kv as attributes of the Span. If a key from kv // already exists for an attribute of the Span it will be overwritten with // the value contained in kv. - SetAttributes(kv ...label.KeyValue) + SetAttributes(kv ...attribute.KeyValue) + + // TracerProvider returns a TracerProvider that can be used to generate + // additional Spans on the same telemetry pipeline as the current Span. + TracerProvider() TracerProvider } // Link is the relationship between two Spans. The relationship can be within @@ -296,8 +397,19 @@ type Span interface { // form. A Link is used to keep reference to the original SpanContext and // track the relationship. type Link struct { - SpanContext - Attributes []label.KeyValue + // SpanContext of the linked Span. + SpanContext SpanContext + + // Attributes describe the aspects of the link. + Attributes []attribute.KeyValue +} + +// LinkFromContext returns a link encapsulating the SpanContext in the provided ctx. +func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { + return Link{ + SpanContext: SpanContextFromContext(ctx), + Attributes: attrs, + } } // SpanKind is the role a Span plays in a Trace. @@ -372,12 +484,28 @@ func (sk SpanKind) String() string { } // Tracer is the creator of Spans. +// +// Warning: methods may be added to this interface in minor releases. type Tracer interface { - // Start creates a span. - Start(ctx context.Context, spanName string, opts ...SpanOption) (context.Context, Span) + // Start creates a span and a context.Context containing the newly-created span. + // + // If the context.Context provided in `ctx` contains a Span then the newly-created + // Span will be a child of that span, otherwise it will be a root span. This behavior + // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the + // newly-created Span to be a root span even if `ctx` contains a Span. + // + // When creating a Span it is recommended to provide all known span attributes using + // the `WithAttributes()` SpanOption as samplers will only have access to the + // attributes provided when a Span is created. + // + // Any Span that is created MUST also be ended. This is the responsibility of the user. + // Implementations of this API may leak memory or other resources if Spans are not ended. + Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) } // TracerProvider provides access to instrumentation Tracers. +// +// Warning: methods may be added to this interface in minor releases. type TracerProvider interface { // Tracer creates an implementation of the Tracer interface. // The instrumentationName must be the name of the library providing @@ -385,5 +513,7 @@ type TracerProvider interface { // only if that code provides built-in instrumentation. If the // instrumentationName is empty, then a implementation defined default // name will be used instead. + // + // This method must be concurrency safe. Tracer(instrumentationName string, opts ...TracerOption) Tracer } diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/src/runtime/vendor/go.opentelemetry.io/otel/trace/tracestate.go new file mode 100644 index 000000000..f457466a2 --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -0,0 +1,217 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "encoding/json" + "fmt" + "regexp" + "strings" +) + +var ( + maxListMembers = 32 + + listDelimiter = "," + + // based on the W3C Trace Context specification, see + // https://www.w3.org/TR/trace-context-1/#tracestate-header + noTenantKeyFormat = `[a-z][_0-9a-z\-\*\/]{0,255}` + withTenantKeyFormat = `[a-z0-9][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}` + valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]` + + keyRe = regexp.MustCompile(`^((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))$`) + valueRe = regexp.MustCompile(`^(` + valueFormat + `)$`) + memberRe = regexp.MustCompile(`^\s*((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`) + + errInvalidKey errorConst = "invalid tracestate key" + errInvalidValue errorConst = "invalid tracestate value" + errInvalidMember errorConst = "invalid tracestate list-member" + errMemberNumber errorConst = "too many list-members in tracestate" + errDuplicate errorConst = "duplicate list-member in tracestate" +) + +type member struct { + Key string + Value string +} + +func newMember(key, value string) (member, error) { + if !keyRe.MatchString(key) { + return member{}, fmt.Errorf("%w: %s", errInvalidKey, key) + } + if !valueRe.MatchString(value) { + return member{}, fmt.Errorf("%w: %s", errInvalidValue, value) + } + return member{Key: key, Value: value}, nil +} + +func parseMemeber(m string) (member, error) { + matches := memberRe.FindStringSubmatch(m) + if len(matches) != 5 { + return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) + } + + return member{ + Key: matches[1], + Value: matches[4], + }, nil + +} + +// String encodes member into a string compliant with the W3C Trace Context +// specification. +func (m member) String() string { + return fmt.Sprintf("%s=%s", m.Key, m.Value) +} + +// TraceState provides additional vendor-specific trace identification +// information across different distributed tracing systems. It represents an +// immutable list consisting of key/value pairs, each pair is referred to as a +// list-member. +// +// TraceState conforms to the W3C Trace Context specification +// (https://www.w3.org/TR/trace-context-1). All operations that create or copy +// a TraceState do so by validating all input and will only produce TraceState +// that conform to the specification. Specifically, this means that all +// list-member's key/value pairs are valid, no duplicate list-members exist, +// and the maximum number of list-members (32) is not exceeded. +type TraceState struct { //nolint:revive // revive complains about stutter of `trace.TraceState` + // list is the members in order. + list []member +} + +var _ json.Marshaler = TraceState{} + +// ParseTraceState attempts to decode a TraceState from the passed +// string. It returns an error if the input is invalid according to the W3C +// Trace Context specification. +func ParseTraceState(tracestate string) (TraceState, error) { + if tracestate == "" { + return TraceState{}, nil + } + + wrapErr := func(err error) error { + return fmt.Errorf("failed to parse tracestate: %w", err) + } + + var members []member + found := make(map[string]struct{}) + for _, memberStr := range strings.Split(tracestate, listDelimiter) { + if len(memberStr) == 0 { + continue + } + + m, err := parseMemeber(memberStr) + if err != nil { + return TraceState{}, wrapErr(err) + } + + if _, ok := found[m.Key]; ok { + return TraceState{}, wrapErr(errDuplicate) + } + found[m.Key] = struct{}{} + + members = append(members, m) + if n := len(members); n > maxListMembers { + return TraceState{}, wrapErr(errMemberNumber) + } + } + + return TraceState{list: members}, nil +} + +// MarshalJSON marshals the TraceState into JSON. +func (ts TraceState) MarshalJSON() ([]byte, error) { + return json.Marshal(ts.String()) +} + +// String encodes the TraceState into a string compliant with the W3C +// Trace Context specification. The returned string will be invalid if the +// TraceState contains any invalid members. +func (ts TraceState) String() string { + members := make([]string, len(ts.list)) + for i, m := range ts.list { + members[i] = m.String() + } + return strings.Join(members, listDelimiter) +} + +// Get returns the value paired with key from the corresponding TraceState +// list-member if it exists, otherwise an empty string is returned. +func (ts TraceState) Get(key string) string { + for _, member := range ts.list { + if member.Key == key { + return member.Value + } + } + + return "" +} + +// Insert adds a new list-member defined by the key/value pair to the +// TraceState. If a list-member already exists for the given key, that +// list-member's value is updated. The new or updated list-member is always +// moved to the beginning of the TraceState as specified by the W3C Trace +// Context specification. +// +// If key or value are invalid according to the W3C Trace Context +// specification an error is returned with the original TraceState. +// +// If adding a new list-member means the TraceState would have more members +// than is allowed an error is returned instead with the original TraceState. +func (ts TraceState) Insert(key, value string) (TraceState, error) { + m, err := newMember(key, value) + if err != nil { + return ts, err + } + + cTS := ts.Delete(key) + if cTS.Len()+1 > maxListMembers { + // TODO (MrAlias): When the second version of the Trace Context + // specification is published this needs to not return an error. + // Instead it should drop the "right-most" member and insert the new + // member at the front. + // + // https://github.com/w3c/trace-context/pull/448 + return ts, fmt.Errorf("failed to insert: %w", errMemberNumber) + } + + cTS.list = append(cTS.list, member{}) + copy(cTS.list[1:], cTS.list) + cTS.list[0] = m + + return cTS, nil +} + +// Delete returns a copy of the TraceState with the list-member identified by +// key removed. +func (ts TraceState) Delete(key string) TraceState { + members := make([]member, ts.Len()) + copy(members, ts.list) + for i, member := range ts.list { + if member.Key == key { + members = append(members[:i], members[i+1:]...) + // TraceState should contain no duplicate members. + break + } + } + return TraceState{list: members} +} + +// Len returns the number of list-members in the TraceState. +func (ts TraceState) Len() int { + return len(ts.list) +} diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/version.go b/src/runtime/vendor/go.opentelemetry.io/otel/version.go index fca98b749..e99407cac 100644 --- a/src/runtime/vendor/go.opentelemetry.io/otel/version.go +++ b/src/runtime/vendor/go.opentelemetry.io/otel/version.go @@ -16,5 +16,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "0.15.0" + return "1.0.0" } diff --git a/src/runtime/vendor/go.opentelemetry.io/otel/versions.yaml b/src/runtime/vendor/go.opentelemetry.io/otel/versions.yaml new file mode 100644 index 000000000..8ebf02503 --- /dev/null +++ b/src/runtime/vendor/go.opentelemetry.io/otel/versions.yaml @@ -0,0 +1,55 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +module-sets: + stable-v1: + version: v1.0.0 + modules: + - go.opentelemetry.io/otel + - go.opentelemetry.io/otel/bridge/opentracing + - go.opentelemetry.io/otel/example/fib + - go.opentelemetry.io/otel/example/jaeger + - go.opentelemetry.io/otel/example/namedtracer + - go.opentelemetry.io/otel/example/otel-collector + - go.opentelemetry.io/otel/example/passthrough + - go.opentelemetry.io/otel/example/zipkin + - go.opentelemetry.io/otel/exporters/jaeger + - go.opentelemetry.io/otel/exporters/zipkin + - go.opentelemetry.io/otel/exporters/otlp/otlptrace + - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc + - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp + - go.opentelemetry.io/otel/exporters/stdout/stdouttrace + - go.opentelemetry.io/otel/trace + - go.opentelemetry.io/otel/sdk + experimental-metrics: + version: v0.23.0 + modules: + - go.opentelemetry.io/otel/example/prometheus + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp + - go.opentelemetry.io/otel/exporters/prometheus + - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric + - go.opentelemetry.io/otel/internal/metric + - go.opentelemetry.io/otel/metric + - go.opentelemetry.io/otel/sdk/export/metric + - go.opentelemetry.io/otel/sdk/metric + bridge: + version: v0.23.0 + modules: + - go.opentelemetry.io/otel/bridge/opencensus + - go.opentelemetry.io/otel/bridge/opencensus/test + - go.opentelemetry.io/otel/example/opencensus +excluded-modules: + - go.opentelemetry.io/otel/internal/tools diff --git a/src/runtime/vendor/golang.org/x/oauth2/README.md b/src/runtime/vendor/golang.org/x/oauth2/README.md index 8cfd6063e..1473e1296 100644 --- a/src/runtime/vendor/golang.org/x/oauth2/README.md +++ b/src/runtime/vendor/golang.org/x/oauth2/README.md @@ -1,7 +1,7 @@ # OAuth2 for Go +[![Go Reference](https://pkg.go.dev/badge/golang.org/x/oauth2.svg)](https://pkg.go.dev/golang.org/x/oauth2) [![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2) -[![GoDoc](https://godoc.org/golang.org/x/oauth2?status.svg)](https://godoc.org/golang.org/x/oauth2) oauth2 package contains a client implementation for OAuth 2.0 spec. @@ -14,17 +14,17 @@ go get golang.org/x/oauth2 Or you can manually git clone the repository to `$(go env GOPATH)/src/golang.org/x/oauth2`. -See godoc for further documentation and examples. +See pkg.go.dev for further documentation and examples. -* [godoc.org/golang.org/x/oauth2](https://godoc.org/golang.org/x/oauth2) -* [godoc.org/golang.org/x/oauth2/google](https://godoc.org/golang.org/x/oauth2/google) +* [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) +* [pkg.go.dev/golang.org/x/oauth2/google](https://pkg.go.dev/golang.org/x/oauth2/google) ## Policy for new packages We no longer accept new provider-specific packages in this repo if all they do is add a single endpoint variable. If you just want to add a single endpoint, add it to the -[godoc.org/golang.org/x/oauth2/endpoints](https://godoc.org/golang.org/x/oauth2/endpoints) +[pkg.go.dev/golang.org/x/oauth2/endpoints](https://pkg.go.dev/golang.org/x/oauth2/endpoints) package. ## Report Issues / Send Patches diff --git a/src/runtime/vendor/golang.org/x/oauth2/internal/client_appengine.go b/src/runtime/vendor/golang.org/x/oauth2/internal/client_appengine.go index 743487188..e1755d1d9 100644 --- a/src/runtime/vendor/golang.org/x/oauth2/internal/client_appengine.go +++ b/src/runtime/vendor/golang.org/x/oauth2/internal/client_appengine.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal diff --git a/src/runtime/vendor/golang.org/x/sync/AUTHORS b/src/runtime/vendor/golang.org/x/sync/AUTHORS deleted file mode 100644 index 15167cd74..000000000 --- a/src/runtime/vendor/golang.org/x/sync/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/src/runtime/vendor/golang.org/x/sync/CONTRIBUTORS b/src/runtime/vendor/golang.org/x/sync/CONTRIBUTORS deleted file mode 100644 index 1c4577e96..000000000 --- a/src/runtime/vendor/golang.org/x/sync/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/src/runtime/vendor/golang.org/x/sync/LICENSE b/src/runtime/vendor/golang.org/x/sync/LICENSE deleted file mode 100644 index 6a66aea5e..000000000 --- a/src/runtime/vendor/golang.org/x/sync/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/runtime/vendor/golang.org/x/sync/PATENTS b/src/runtime/vendor/golang.org/x/sync/PATENTS deleted file mode 100644 index 733099041..000000000 --- a/src/runtime/vendor/golang.org/x/sync/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/src/runtime/vendor/golang.org/x/sync/semaphore/semaphore.go b/src/runtime/vendor/golang.org/x/sync/semaphore/semaphore.go deleted file mode 100644 index 30f632c57..000000000 --- a/src/runtime/vendor/golang.org/x/sync/semaphore/semaphore.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package semaphore provides a weighted semaphore implementation. -package semaphore // import "golang.org/x/sync/semaphore" - -import ( - "container/list" - "context" - "sync" -) - -type waiter struct { - n int64 - ready chan<- struct{} // Closed when semaphore acquired. -} - -// NewWeighted creates a new weighted semaphore with the given -// maximum combined weight for concurrent access. -func NewWeighted(n int64) *Weighted { - w := &Weighted{size: n} - return w -} - -// Weighted provides a way to bound concurrent access to a resource. -// The callers can request access with a given weight. -type Weighted struct { - size int64 - cur int64 - mu sync.Mutex - waiters list.List -} - -// Acquire acquires the semaphore with a weight of n, blocking until resources -// are available or ctx is done. On success, returns nil. On failure, returns -// ctx.Err() and leaves the semaphore unchanged. -// -// If ctx is already done, Acquire may still succeed without blocking. -func (s *Weighted) Acquire(ctx context.Context, n int64) error { - s.mu.Lock() - if s.size-s.cur >= n && s.waiters.Len() == 0 { - s.cur += n - s.mu.Unlock() - return nil - } - - if n > s.size { - // Don't make other Acquire calls block on one that's doomed to fail. - s.mu.Unlock() - <-ctx.Done() - return ctx.Err() - } - - ready := make(chan struct{}) - w := waiter{n: n, ready: ready} - elem := s.waiters.PushBack(w) - s.mu.Unlock() - - select { - case <-ctx.Done(): - err := ctx.Err() - s.mu.Lock() - select { - case <-ready: - // Acquired the semaphore after we were canceled. Rather than trying to - // fix up the queue, just pretend we didn't notice the cancelation. - err = nil - default: - isFront := s.waiters.Front() == elem - s.waiters.Remove(elem) - // If we're at the front and there're extra tokens left, notify other waiters. - if isFront && s.size > s.cur { - s.notifyWaiters() - } - } - s.mu.Unlock() - return err - - case <-ready: - return nil - } -} - -// TryAcquire acquires the semaphore with a weight of n without blocking. -// On success, returns true. On failure, returns false and leaves the semaphore unchanged. -func (s *Weighted) TryAcquire(n int64) bool { - s.mu.Lock() - success := s.size-s.cur >= n && s.waiters.Len() == 0 - if success { - s.cur += n - } - s.mu.Unlock() - return success -} - -// Release releases the semaphore with a weight of n. -func (s *Weighted) Release(n int64) { - s.mu.Lock() - s.cur -= n - if s.cur < 0 { - s.mu.Unlock() - panic("semaphore: released more than held") - } - s.notifyWaiters() - s.mu.Unlock() -} - -func (s *Weighted) notifyWaiters() { - for { - next := s.waiters.Front() - if next == nil { - break // No more waiters blocked. - } - - w := next.Value.(waiter) - if s.size-s.cur < w.n { - // Not enough tokens for the next waiter. We could keep going (to try to - // find a waiter with a smaller request), but under load that could cause - // starvation for large requests; instead, we leave all remaining waiters - // blocked. - // - // Consider a semaphore used as a read-write lock, with N tokens, N - // readers, and one writer. Each reader can Acquire(1) to obtain a read - // lock. The writer can Acquire(N) to obtain a write lock, excluding all - // of the readers. If we allow the readers to jump ahead in the queue, - // the writer will starve — there is always one token available for every - // reader. - break - } - - s.cur += w.n - s.waiters.Remove(next) - close(w.ready) - } -} diff --git a/src/runtime/vendor/golang.org/x/sys/windows/registry/key.go b/src/runtime/vendor/golang.org/x/sys/windows/registry/key.go new file mode 100644 index 000000000..c25648343 --- /dev/null +++ b/src/runtime/vendor/golang.org/x/sys/windows/registry/key.go @@ -0,0 +1,198 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +// Package registry provides access to the Windows registry. +// +// Here is a simple example, opening a registry key and reading a string value from it. +// +// k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) +// if err != nil { +// log.Fatal(err) +// } +// defer k.Close() +// +// s, _, err := k.GetStringValue("SystemRoot") +// if err != nil { +// log.Fatal(err) +// } +// fmt.Printf("Windows system root is %q\n", s) +// +package registry + +import ( + "io" + "syscall" + "time" +) + +const ( + // Registry key security and access rights. + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms724878.aspx + // for details. + ALL_ACCESS = 0xf003f + CREATE_LINK = 0x00020 + CREATE_SUB_KEY = 0x00004 + ENUMERATE_SUB_KEYS = 0x00008 + EXECUTE = 0x20019 + NOTIFY = 0x00010 + QUERY_VALUE = 0x00001 + READ = 0x20019 + SET_VALUE = 0x00002 + WOW64_32KEY = 0x00200 + WOW64_64KEY = 0x00100 + WRITE = 0x20006 +) + +// Key is a handle to an open Windows registry key. +// Keys can be obtained by calling OpenKey; there are +// also some predefined root keys such as CURRENT_USER. +// Keys can be used directly in the Windows API. +type Key syscall.Handle + +const ( + // Windows defines some predefined root keys that are always open. + // An application can use these keys as entry points to the registry. + // Normally these keys are used in OpenKey to open new keys, + // but they can also be used anywhere a Key is required. + CLASSES_ROOT = Key(syscall.HKEY_CLASSES_ROOT) + CURRENT_USER = Key(syscall.HKEY_CURRENT_USER) + LOCAL_MACHINE = Key(syscall.HKEY_LOCAL_MACHINE) + USERS = Key(syscall.HKEY_USERS) + CURRENT_CONFIG = Key(syscall.HKEY_CURRENT_CONFIG) + PERFORMANCE_DATA = Key(syscall.HKEY_PERFORMANCE_DATA) +) + +// Close closes open key k. +func (k Key) Close() error { + return syscall.RegCloseKey(syscall.Handle(k)) +} + +// OpenKey opens a new key with path name relative to key k. +// It accepts any open key, including CURRENT_USER and others, +// and returns the new key and an error. +// The access parameter specifies desired access rights to the +// key to be opened. +func OpenKey(k Key, path string, access uint32) (Key, error) { + p, err := syscall.UTF16PtrFromString(path) + if err != nil { + return 0, err + } + var subkey syscall.Handle + err = syscall.RegOpenKeyEx(syscall.Handle(k), p, 0, access, &subkey) + if err != nil { + return 0, err + } + return Key(subkey), nil +} + +// OpenRemoteKey opens a predefined registry key on another +// computer pcname. The key to be opened is specified by k, but +// can only be one of LOCAL_MACHINE, PERFORMANCE_DATA or USERS. +// If pcname is "", OpenRemoteKey returns local computer key. +func OpenRemoteKey(pcname string, k Key) (Key, error) { + var err error + var p *uint16 + if pcname != "" { + p, err = syscall.UTF16PtrFromString(`\\` + pcname) + if err != nil { + return 0, err + } + } + var remoteKey syscall.Handle + err = regConnectRegistry(p, syscall.Handle(k), &remoteKey) + if err != nil { + return 0, err + } + return Key(remoteKey), nil +} + +// ReadSubKeyNames returns the names of subkeys of key k. +// The parameter n controls the number of returned names, +// analogous to the way os.File.Readdirnames works. +func (k Key) ReadSubKeyNames(n int) ([]string, error) { + names := make([]string, 0) + // Registry key size limit is 255 bytes and described there: + // https://msdn.microsoft.com/library/windows/desktop/ms724872.aspx + buf := make([]uint16, 256) //plus extra room for terminating zero byte +loopItems: + for i := uint32(0); ; i++ { + if n > 0 { + if len(names) == n { + return names, nil + } + } + l := uint32(len(buf)) + for { + err := syscall.RegEnumKeyEx(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) + if err == nil { + break + } + if err == syscall.ERROR_MORE_DATA { + // Double buffer size and try again. + l = uint32(2 * len(buf)) + buf = make([]uint16, l) + continue + } + if err == _ERROR_NO_MORE_ITEMS { + break loopItems + } + return names, err + } + names = append(names, syscall.UTF16ToString(buf[:l])) + } + if n > len(names) { + return names, io.EOF + } + return names, nil +} + +// CreateKey creates a key named path under open key k. +// CreateKey returns the new key and a boolean flag that reports +// whether the key already existed. +// The access parameter specifies the access rights for the key +// to be created. +func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool, err error) { + var h syscall.Handle + var d uint32 + err = regCreateKeyEx(syscall.Handle(k), syscall.StringToUTF16Ptr(path), + 0, nil, _REG_OPTION_NON_VOLATILE, access, nil, &h, &d) + if err != nil { + return 0, false, err + } + return Key(h), d == _REG_OPENED_EXISTING_KEY, nil +} + +// DeleteKey deletes the subkey path of key k and its values. +func DeleteKey(k Key, path string) error { + return regDeleteKey(syscall.Handle(k), syscall.StringToUTF16Ptr(path)) +} + +// A KeyInfo describes the statistics of a key. It is returned by Stat. +type KeyInfo struct { + SubKeyCount uint32 + MaxSubKeyLen uint32 // size of the key's subkey with the longest name, in Unicode characters, not including the terminating zero byte + ValueCount uint32 + MaxValueNameLen uint32 // size of the key's longest value name, in Unicode characters, not including the terminating zero byte + MaxValueLen uint32 // longest data component among the key's values, in bytes + lastWriteTime syscall.Filetime +} + +// ModTime returns the key's last write time. +func (ki *KeyInfo) ModTime() time.Time { + return time.Unix(0, ki.lastWriteTime.Nanoseconds()) +} + +// Stat retrieves information about the open key k. +func (k Key) Stat() (*KeyInfo, error) { + var ki KeyInfo + err := syscall.RegQueryInfoKey(syscall.Handle(k), nil, nil, nil, + &ki.SubKeyCount, &ki.MaxSubKeyLen, nil, &ki.ValueCount, + &ki.MaxValueNameLen, &ki.MaxValueLen, nil, &ki.lastWriteTime) + if err != nil { + return nil, err + } + return &ki, nil +} diff --git a/src/runtime/vendor/golang.org/x/sys/windows/registry/mksyscall.go b/src/runtime/vendor/golang.org/x/sys/windows/registry/mksyscall.go new file mode 100644 index 000000000..50c32a3f4 --- /dev/null +++ b/src/runtime/vendor/golang.org/x/sys/windows/registry/mksyscall.go @@ -0,0 +1,9 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build generate + +package registry + +//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go syscall.go diff --git a/src/runtime/vendor/golang.org/x/sys/windows/registry/syscall.go b/src/runtime/vendor/golang.org/x/sys/windows/registry/syscall.go new file mode 100644 index 000000000..e66643cba --- /dev/null +++ b/src/runtime/vendor/golang.org/x/sys/windows/registry/syscall.go @@ -0,0 +1,32 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package registry + +import "syscall" + +const ( + _REG_OPTION_NON_VOLATILE = 0 + + _REG_CREATED_NEW_KEY = 1 + _REG_OPENED_EXISTING_KEY = 2 + + _ERROR_NO_MORE_ITEMS syscall.Errno = 259 +) + +func LoadRegLoadMUIString() error { + return procRegLoadMUIStringW.Find() +} + +//sys regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW +//sys regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) = advapi32.RegDeleteKeyW +//sys regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) = advapi32.RegSetValueExW +//sys regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegEnumValueW +//sys regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) = advapi32.RegDeleteValueW +//sys regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) = advapi32.RegLoadMUIStringW +//sys regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) = advapi32.RegConnectRegistryW + +//sys expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) = kernel32.ExpandEnvironmentStringsW diff --git a/src/runtime/vendor/golang.org/x/sys/windows/registry/value.go b/src/runtime/vendor/golang.org/x/sys/windows/registry/value.go new file mode 100644 index 000000000..f25e7e97a --- /dev/null +++ b/src/runtime/vendor/golang.org/x/sys/windows/registry/value.go @@ -0,0 +1,386 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package registry + +import ( + "errors" + "io" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + // Registry value types. + NONE = 0 + SZ = 1 + EXPAND_SZ = 2 + BINARY = 3 + DWORD = 4 + DWORD_BIG_ENDIAN = 5 + LINK = 6 + MULTI_SZ = 7 + RESOURCE_LIST = 8 + FULL_RESOURCE_DESCRIPTOR = 9 + RESOURCE_REQUIREMENTS_LIST = 10 + QWORD = 11 +) + +var ( + // ErrShortBuffer is returned when the buffer was too short for the operation. + ErrShortBuffer = syscall.ERROR_MORE_DATA + + // ErrNotExist is returned when a registry key or value does not exist. + ErrNotExist = syscall.ERROR_FILE_NOT_FOUND + + // ErrUnexpectedType is returned by Get*Value when the value's type was unexpected. + ErrUnexpectedType = errors.New("unexpected key value type") +) + +// GetValue retrieves the type and data for the specified value associated +// with an open key k. It fills up buffer buf and returns the retrieved +// byte count n. If buf is too small to fit the stored value it returns +// ErrShortBuffer error along with the required buffer size n. +// If no buffer is provided, it returns true and actual buffer size n. +// If no buffer is provided, GetValue returns the value's type only. +// If the value does not exist, the error returned is ErrNotExist. +// +// GetValue is a low level function. If value's type is known, use the appropriate +// Get*Value function instead. +func (k Key) GetValue(name string, buf []byte) (n int, valtype uint32, err error) { + pname, err := syscall.UTF16PtrFromString(name) + if err != nil { + return 0, 0, err + } + var pbuf *byte + if len(buf) > 0 { + pbuf = (*byte)(unsafe.Pointer(&buf[0])) + } + l := uint32(len(buf)) + err = syscall.RegQueryValueEx(syscall.Handle(k), pname, nil, &valtype, pbuf, &l) + if err != nil { + return int(l), valtype, err + } + return int(l), valtype, nil +} + +func (k Key) getValue(name string, buf []byte) (data []byte, valtype uint32, err error) { + p, err := syscall.UTF16PtrFromString(name) + if err != nil { + return nil, 0, err + } + var t uint32 + n := uint32(len(buf)) + for { + err = syscall.RegQueryValueEx(syscall.Handle(k), p, nil, &t, (*byte)(unsafe.Pointer(&buf[0])), &n) + if err == nil { + return buf[:n], t, nil + } + if err != syscall.ERROR_MORE_DATA { + return nil, 0, err + } + if n <= uint32(len(buf)) { + return nil, 0, err + } + buf = make([]byte, n) + } +} + +// GetStringValue retrieves the string value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetStringValue returns ErrNotExist. +// If value is not SZ or EXPAND_SZ, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetStringValue(name string) (val string, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return "", typ, err2 + } + switch typ { + case SZ, EXPAND_SZ: + default: + return "", typ, ErrUnexpectedType + } + if len(data) == 0 { + return "", typ, nil + } + u := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2] + return syscall.UTF16ToString(u), typ, nil +} + +// GetMUIStringValue retrieves the localized string value for +// the specified value name associated with an open key k. +// If the value name doesn't exist or the localized string value +// can't be resolved, GetMUIStringValue returns ErrNotExist. +// GetMUIStringValue panics if the system doesn't support +// regLoadMUIString; use LoadRegLoadMUIString to check if +// regLoadMUIString is supported before calling this function. +func (k Key) GetMUIStringValue(name string) (string, error) { + pname, err := syscall.UTF16PtrFromString(name) + if err != nil { + return "", err + } + + buf := make([]uint16, 1024) + var buflen uint32 + var pdir *uint16 + + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + if err == syscall.ERROR_FILE_NOT_FOUND { // Try fallback path + + // Try to resolve the string value using the system directory as + // a DLL search path; this assumes the string value is of the form + // @[path]\dllname,-strID but with no path given, e.g. @tzres.dll,-320. + + // This approach works with tzres.dll but may have to be revised + // in the future to allow callers to provide custom search paths. + + var s string + s, err = ExpandString("%SystemRoot%\\system32\\") + if err != nil { + return "", err + } + pdir, err = syscall.UTF16PtrFromString(s) + if err != nil { + return "", err + } + + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + } + + for err == syscall.ERROR_MORE_DATA { // Grow buffer if needed + if buflen <= uint32(len(buf)) { + break // Buffer not growing, assume race; break + } + buf = make([]uint16, buflen) + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + } + + if err != nil { + return "", err + } + + return syscall.UTF16ToString(buf), nil +} + +// ExpandString expands environment-variable strings and replaces +// them with the values defined for the current user. +// Use ExpandString to expand EXPAND_SZ strings. +func ExpandString(value string) (string, error) { + if value == "" { + return "", nil + } + p, err := syscall.UTF16PtrFromString(value) + if err != nil { + return "", err + } + r := make([]uint16, 100) + for { + n, err := expandEnvironmentStrings(p, &r[0], uint32(len(r))) + if err != nil { + return "", err + } + if n <= uint32(len(r)) { + return syscall.UTF16ToString(r[:n]), nil + } + r = make([]uint16, n) + } +} + +// GetStringsValue retrieves the []string value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetStringsValue returns ErrNotExist. +// If value is not MULTI_SZ, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetStringsValue(name string) (val []string, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return nil, typ, err2 + } + if typ != MULTI_SZ { + return nil, typ, ErrUnexpectedType + } + if len(data) == 0 { + return nil, typ, nil + } + p := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2] + if len(p) == 0 { + return nil, typ, nil + } + if p[len(p)-1] == 0 { + p = p[:len(p)-1] // remove terminating null + } + val = make([]string, 0, 5) + from := 0 + for i, c := range p { + if c == 0 { + val = append(val, string(utf16.Decode(p[from:i]))) + from = i + 1 + } + } + return val, typ, nil +} + +// GetIntegerValue retrieves the integer value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetIntegerValue returns ErrNotExist. +// If value is not DWORD or QWORD, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetIntegerValue(name string) (val uint64, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 8)) + if err2 != nil { + return 0, typ, err2 + } + switch typ { + case DWORD: + if len(data) != 4 { + return 0, typ, errors.New("DWORD value is not 4 bytes long") + } + var val32 uint32 + copy((*[4]byte)(unsafe.Pointer(&val32))[:], data) + return uint64(val32), DWORD, nil + case QWORD: + if len(data) != 8 { + return 0, typ, errors.New("QWORD value is not 8 bytes long") + } + copy((*[8]byte)(unsafe.Pointer(&val))[:], data) + return val, QWORD, nil + default: + return 0, typ, ErrUnexpectedType + } +} + +// GetBinaryValue retrieves the binary value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetBinaryValue returns ErrNotExist. +// If value is not BINARY, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetBinaryValue(name string) (val []byte, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return nil, typ, err2 + } + if typ != BINARY { + return nil, typ, ErrUnexpectedType + } + return data, typ, nil +} + +func (k Key) setValue(name string, valtype uint32, data []byte) error { + p, err := syscall.UTF16PtrFromString(name) + if err != nil { + return err + } + if len(data) == 0 { + return regSetValueEx(syscall.Handle(k), p, 0, valtype, nil, 0) + } + return regSetValueEx(syscall.Handle(k), p, 0, valtype, &data[0], uint32(len(data))) +} + +// SetDWordValue sets the data and type of a name value +// under key k to value and DWORD. +func (k Key) SetDWordValue(name string, value uint32) error { + return k.setValue(name, DWORD, (*[4]byte)(unsafe.Pointer(&value))[:]) +} + +// SetQWordValue sets the data and type of a name value +// under key k to value and QWORD. +func (k Key) SetQWordValue(name string, value uint64) error { + return k.setValue(name, QWORD, (*[8]byte)(unsafe.Pointer(&value))[:]) +} + +func (k Key) setStringValue(name string, valtype uint32, value string) error { + v, err := syscall.UTF16FromString(value) + if err != nil { + return err + } + buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2] + return k.setValue(name, valtype, buf) +} + +// SetStringValue sets the data and type of a name value +// under key k to value and SZ. The value must not contain a zero byte. +func (k Key) SetStringValue(name, value string) error { + return k.setStringValue(name, SZ, value) +} + +// SetExpandStringValue sets the data and type of a name value +// under key k to value and EXPAND_SZ. The value must not contain a zero byte. +func (k Key) SetExpandStringValue(name, value string) error { + return k.setStringValue(name, EXPAND_SZ, value) +} + +// SetStringsValue sets the data and type of a name value +// under key k to value and MULTI_SZ. The value strings +// must not contain a zero byte. +func (k Key) SetStringsValue(name string, value []string) error { + ss := "" + for _, s := range value { + for i := 0; i < len(s); i++ { + if s[i] == 0 { + return errors.New("string cannot have 0 inside") + } + } + ss += s + "\x00" + } + v := utf16.Encode([]rune(ss + "\x00")) + buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2] + return k.setValue(name, MULTI_SZ, buf) +} + +// SetBinaryValue sets the data and type of a name value +// under key k to value and BINARY. +func (k Key) SetBinaryValue(name string, value []byte) error { + return k.setValue(name, BINARY, value) +} + +// DeleteValue removes a named value from the key k. +func (k Key) DeleteValue(name string) error { + return regDeleteValue(syscall.Handle(k), syscall.StringToUTF16Ptr(name)) +} + +// ReadValueNames returns the value names of key k. +// The parameter n controls the number of returned names, +// analogous to the way os.File.Readdirnames works. +func (k Key) ReadValueNames(n int) ([]string, error) { + ki, err := k.Stat() + if err != nil { + return nil, err + } + names := make([]string, 0, ki.ValueCount) + buf := make([]uint16, ki.MaxValueNameLen+1) // extra room for terminating null character +loopItems: + for i := uint32(0); ; i++ { + if n > 0 { + if len(names) == n { + return names, nil + } + } + l := uint32(len(buf)) + for { + err := regEnumValue(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) + if err == nil { + break + } + if err == syscall.ERROR_MORE_DATA { + // Double buffer size and try again. + l = uint32(2 * len(buf)) + buf = make([]uint16, l) + continue + } + if err == _ERROR_NO_MORE_ITEMS { + break loopItems + } + return names, err + } + names = append(names, syscall.UTF16ToString(buf[:l])) + } + if n > len(names) { + return names, io.EOF + } + return names, nil +} diff --git a/src/runtime/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go b/src/runtime/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go new file mode 100644 index 000000000..fc1835d8a --- /dev/null +++ b/src/runtime/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go @@ -0,0 +1,117 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package registry + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procRegConnectRegistryW = modadvapi32.NewProc("RegConnectRegistryW") + procRegCreateKeyExW = modadvapi32.NewProc("RegCreateKeyExW") + procRegDeleteKeyW = modadvapi32.NewProc("RegDeleteKeyW") + procRegDeleteValueW = modadvapi32.NewProc("RegDeleteValueW") + procRegEnumValueW = modadvapi32.NewProc("RegEnumValueW") + procRegLoadMUIStringW = modadvapi32.NewProc("RegLoadMUIStringW") + procRegSetValueExW = modadvapi32.NewProc("RegSetValueExW") + procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") +) + +func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegConnectRegistryW.Addr(), 3, uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegDeleteKeyW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(subkey)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegDeleteValueW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(name)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegEnumValueW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegLoadMUIStringW.Addr(), 7, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir)), 0, 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) { + r0, _, _ := syscall.Syscall6(procRegSetValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize)) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} diff --git a/src/runtime/vendor/golang.org/x/text/unicode/bidi/bidi.go b/src/runtime/vendor/golang.org/x/text/unicode/bidi/bidi.go index e8edc54cc..fd057601b 100644 --- a/src/runtime/vendor/golang.org/x/text/unicode/bidi/bidi.go +++ b/src/runtime/vendor/golang.org/x/text/unicode/bidi/bidi.go @@ -12,15 +12,14 @@ // and without notice. package bidi // import "golang.org/x/text/unicode/bidi" -// TODO: -// The following functionality would not be hard to implement, but hinges on -// the definition of a Segmenter interface. For now this is up to the user. -// - Iterate over paragraphs -// - Segmenter to iterate over runs directly from a given text. -// Also: +// TODO // - Transformer for reordering? // - Transformer (validator, really) for Bidi Rule. +import ( + "bytes" +) + // This API tries to avoid dealing with embedding levels for now. Under the hood // these will be computed, but the question is to which extent the user should // know they exist. We should at some point allow the user to specify an @@ -49,7 +48,9 @@ const ( Neutral ) -type options struct{} +type options struct { + defaultDirection Direction +} // An Option is an option for Bidi processing. type Option func(*options) @@ -66,12 +67,62 @@ type Option func(*options) // DefaultDirection sets the default direction for a Paragraph. The direction is // overridden if the text contains directional characters. func DefaultDirection(d Direction) Option { - panic("unimplemented") + return func(opts *options) { + opts.defaultDirection = d + } } // A Paragraph holds a single Paragraph for Bidi processing. type Paragraph struct { - // buffers + p []byte + o Ordering + opts []Option + types []Class + pairTypes []bracketType + pairValues []rune + runes []rune + options options +} + +// Initialize the p.pairTypes, p.pairValues and p.types from the input previously +// set by p.SetBytes() or p.SetString(). Also limit the input up to (and including) a paragraph +// separator (bidi class B). +// +// The function p.Order() needs these values to be set, so this preparation could be postponed. +// But since the SetBytes and SetStrings functions return the length of the input up to the paragraph +// separator, the whole input needs to be processed anyway and should not be done twice. +// +// The function has the same return values as SetBytes() / SetString() +func (p *Paragraph) prepareInput() (n int, err error) { + p.runes = bytes.Runes(p.p) + bytecount := 0 + // clear slices from previous SetString or SetBytes + p.pairTypes = nil + p.pairValues = nil + p.types = nil + + for _, r := range p.runes { + props, i := LookupRune(r) + bytecount += i + cls := props.Class() + if cls == B { + return bytecount, nil + } + p.types = append(p.types, cls) + if props.IsOpeningBracket() { + p.pairTypes = append(p.pairTypes, bpOpen) + p.pairValues = append(p.pairValues, r) + } else if props.IsBracket() { + // this must be a closing bracket, + // since IsOpeningBracket is not true + p.pairTypes = append(p.pairTypes, bpClose) + p.pairValues = append(p.pairValues, r) + } else { + p.pairTypes = append(p.pairTypes, bpNone) + p.pairValues = append(p.pairValues, 0) + } + } + return bytecount, nil } // SetBytes configures p for the given paragraph text. It replaces text @@ -80,70 +131,150 @@ type Paragraph struct { // consumed from b including this separator. Error may be non-nil if options are // given. func (p *Paragraph) SetBytes(b []byte, opts ...Option) (n int, err error) { - panic("unimplemented") + p.p = b + p.opts = opts + return p.prepareInput() } -// SetString configures p for the given paragraph text. It replaces text -// previously set by SetBytes or SetString. If b contains a paragraph separator +// SetString configures s for the given paragraph text. It replaces text +// previously set by SetBytes or SetString. If s contains a paragraph separator // it will only process the first paragraph and report the number of bytes -// consumed from b including this separator. Error may be non-nil if options are +// consumed from s including this separator. Error may be non-nil if options are // given. func (p *Paragraph) SetString(s string, opts ...Option) (n int, err error) { - panic("unimplemented") + p.p = []byte(s) + p.opts = opts + return p.prepareInput() } // IsLeftToRight reports whether the principle direction of rendering for this // paragraphs is left-to-right. If this returns false, the principle direction // of rendering is right-to-left. func (p *Paragraph) IsLeftToRight() bool { - panic("unimplemented") + return p.Direction() == LeftToRight } // Direction returns the direction of the text of this paragraph. // // The direction may be LeftToRight, RightToLeft, Mixed, or Neutral. func (p *Paragraph) Direction() Direction { - panic("unimplemented") + return p.o.Direction() } +// TODO: what happens if the position is > len(input)? This should return an error. + // RunAt reports the Run at the given position of the input text. // // This method can be used for computing line breaks on paragraphs. func (p *Paragraph) RunAt(pos int) Run { - panic("unimplemented") + c := 0 + runNumber := 0 + for i, r := range p.o.runes { + c += len(r) + if pos < c { + runNumber = i + } + } + return p.o.Run(runNumber) +} + +func calculateOrdering(levels []level, runes []rune) Ordering { + var curDir Direction + + prevDir := Neutral + prevI := 0 + + o := Ordering{} + // lvl = 0,2,4,...: left to right + // lvl = 1,3,5,...: right to left + for i, lvl := range levels { + if lvl%2 == 0 { + curDir = LeftToRight + } else { + curDir = RightToLeft + } + if curDir != prevDir { + if i > 0 { + o.runes = append(o.runes, runes[prevI:i]) + o.directions = append(o.directions, prevDir) + o.startpos = append(o.startpos, prevI) + } + prevI = i + prevDir = curDir + } + } + o.runes = append(o.runes, runes[prevI:]) + o.directions = append(o.directions, prevDir) + o.startpos = append(o.startpos, prevI) + return o } // Order computes the visual ordering of all the runs in a Paragraph. func (p *Paragraph) Order() (Ordering, error) { - panic("unimplemented") + if len(p.types) == 0 { + return Ordering{}, nil + } + + for _, fn := range p.opts { + fn(&p.options) + } + lvl := level(-1) + if p.options.defaultDirection == RightToLeft { + lvl = 1 + } + para, err := newParagraph(p.types, p.pairTypes, p.pairValues, lvl) + if err != nil { + return Ordering{}, err + } + + levels := para.getLevels([]int{len(p.types)}) + + p.o = calculateOrdering(levels, p.runes) + return p.o, nil } // Line computes the visual ordering of runs for a single line starting and // ending at the given positions in the original text. func (p *Paragraph) Line(start, end int) (Ordering, error) { - panic("unimplemented") + lineTypes := p.types[start:end] + para, err := newParagraph(lineTypes, p.pairTypes[start:end], p.pairValues[start:end], -1) + if err != nil { + return Ordering{}, err + } + levels := para.getLevels([]int{len(lineTypes)}) + o := calculateOrdering(levels, p.runes[start:end]) + return o, nil } // An Ordering holds the computed visual order of runs of a Paragraph. Calling // SetBytes or SetString on the originating Paragraph invalidates an Ordering. // The methods of an Ordering should only be called by one goroutine at a time. -type Ordering struct{} +type Ordering struct { + runes [][]rune + directions []Direction + startpos []int +} // Direction reports the directionality of the runs. // // The direction may be LeftToRight, RightToLeft, Mixed, or Neutral. func (o *Ordering) Direction() Direction { - panic("unimplemented") + return o.directions[0] } // NumRuns returns the number of runs. func (o *Ordering) NumRuns() int { - panic("unimplemented") + return len(o.runes) } // Run returns the ith run within the ordering. func (o *Ordering) Run(i int) Run { - panic("unimplemented") + r := Run{ + runes: o.runes[i], + direction: o.directions[i], + startpos: o.startpos[i], + } + return r } // TODO: perhaps with options. @@ -155,16 +286,19 @@ func (o *Ordering) Run(i int) Run { // A Run is a continuous sequence of characters of a single direction. type Run struct { + runes []rune + direction Direction + startpos int } // String returns the text of the run in its original order. func (r *Run) String() string { - panic("unimplemented") + return string(r.runes) } // Bytes returns the text of the run in its original order. func (r *Run) Bytes() []byte { - panic("unimplemented") + return []byte(r.String()) } // TODO: methods for @@ -174,25 +308,52 @@ func (r *Run) Bytes() []byte { // Direction reports the direction of the run. func (r *Run) Direction() Direction { - panic("unimplemented") + return r.direction } -// Position of the Run within the text passed to SetBytes or SetString of the +// Pos returns the position of the Run within the text passed to SetBytes or SetString of the // originating Paragraph value. func (r *Run) Pos() (start, end int) { - panic("unimplemented") + return r.startpos, r.startpos + len(r.runes) - 1 } // AppendReverse reverses the order of characters of in, appends them to out, // and returns the result. Modifiers will still follow the runes they modify. // Brackets are replaced with their counterparts. func AppendReverse(out, in []byte) []byte { - panic("unimplemented") + ret := make([]byte, len(in)+len(out)) + copy(ret, out) + inRunes := bytes.Runes(in) + + for i, r := range inRunes { + prop, _ := LookupRune(r) + if prop.IsBracket() { + inRunes[i] = prop.reverseBracket(r) + } + } + + for i, j := 0, len(inRunes)-1; i < j; i, j = i+1, j-1 { + inRunes[i], inRunes[j] = inRunes[j], inRunes[i] + } + copy(ret[len(out):], string(inRunes)) + + return ret } // ReverseString reverses the order of characters in s and returns a new string. // Modifiers will still follow the runes they modify. Brackets are replaced with // their counterparts. func ReverseString(s string) string { - panic("unimplemented") + input := []rune(s) + li := len(input) + ret := make([]rune, li) + for i, r := range input { + prop, _ := LookupRune(r) + if prop.IsBracket() { + ret[li-i-1] = prop.reverseBracket(r) + } else { + ret[li-i-1] = r + } + } + return string(ret) } diff --git a/src/runtime/vendor/golang.org/x/text/unicode/bidi/core.go b/src/runtime/vendor/golang.org/x/text/unicode/bidi/core.go index 50deb6600..e4c081101 100644 --- a/src/runtime/vendor/golang.org/x/text/unicode/bidi/core.go +++ b/src/runtime/vendor/golang.org/x/text/unicode/bidi/core.go @@ -4,7 +4,10 @@ package bidi -import "log" +import ( + "fmt" + "log" +) // This implementation is a port based on the reference implementation found at: // https://www.unicode.org/Public/PROGRAMS/BidiReferenceJava/ @@ -97,13 +100,20 @@ type paragraph struct { // rune (suggested is the rune of the open bracket for opening and matching // close brackets, after normalization). The embedding levels are optional, but // may be supplied to encode embedding levels of styled text. -// -// TODO: return an error. -func newParagraph(types []Class, pairTypes []bracketType, pairValues []rune, levels level) *paragraph { - validateTypes(types) - validatePbTypes(pairTypes) - validatePbValues(pairValues, pairTypes) - validateParagraphEmbeddingLevel(levels) +func newParagraph(types []Class, pairTypes []bracketType, pairValues []rune, levels level) (*paragraph, error) { + var err error + if err = validateTypes(types); err != nil { + return nil, err + } + if err = validatePbTypes(pairTypes); err != nil { + return nil, err + } + if err = validatePbValues(pairValues, pairTypes); err != nil { + return nil, err + } + if err = validateParagraphEmbeddingLevel(levels); err != nil { + return nil, err + } p := ¶graph{ initialTypes: append([]Class(nil), types...), @@ -115,7 +125,7 @@ func newParagraph(types []Class, pairTypes []bracketType, pairValues []rune, lev resultTypes: append([]Class(nil), types...), } p.run() - return p + return p, nil } func (p *paragraph) Len() int { return len(p.initialTypes) } @@ -1001,58 +1011,61 @@ func typeForLevel(level level) Class { return R } -// TODO: change validation to not panic - -func validateTypes(types []Class) { +func validateTypes(types []Class) error { if len(types) == 0 { - log.Panic("types is null") + return fmt.Errorf("types is null") } for i, t := range types[:len(types)-1] { if t == B { - log.Panicf("B type before end of paragraph at index: %d", i) + return fmt.Errorf("B type before end of paragraph at index: %d", i) } } + return nil } -func validateParagraphEmbeddingLevel(embeddingLevel level) { +func validateParagraphEmbeddingLevel(embeddingLevel level) error { if embeddingLevel != implicitLevel && embeddingLevel != 0 && embeddingLevel != 1 { - log.Panicf("illegal paragraph embedding level: %d", embeddingLevel) + return fmt.Errorf("illegal paragraph embedding level: %d", embeddingLevel) } + return nil } -func validateLineBreaks(linebreaks []int, textLength int) { +func validateLineBreaks(linebreaks []int, textLength int) error { prev := 0 for i, next := range linebreaks { if next <= prev { - log.Panicf("bad linebreak: %d at index: %d", next, i) + return fmt.Errorf("bad linebreak: %d at index: %d", next, i) } prev = next } if prev != textLength { - log.Panicf("last linebreak was %d, want %d", prev, textLength) + return fmt.Errorf("last linebreak was %d, want %d", prev, textLength) } + return nil } -func validatePbTypes(pairTypes []bracketType) { +func validatePbTypes(pairTypes []bracketType) error { if len(pairTypes) == 0 { - log.Panic("pairTypes is null") + return fmt.Errorf("pairTypes is null") } for i, pt := range pairTypes { switch pt { case bpNone, bpOpen, bpClose: default: - log.Panicf("illegal pairType value at %d: %v", i, pairTypes[i]) + return fmt.Errorf("illegal pairType value at %d: %v", i, pairTypes[i]) } } + return nil } -func validatePbValues(pairValues []rune, pairTypes []bracketType) { +func validatePbValues(pairValues []rune, pairTypes []bracketType) error { if pairValues == nil { - log.Panic("pairValues is null") + return fmt.Errorf("pairValues is null") } if len(pairTypes) != len(pairValues) { - log.Panic("pairTypes is different length from pairValues") + return fmt.Errorf("pairTypes is different length from pairValues") } + return nil } diff --git a/src/runtime/vendor/google.golang.org/api/AUTHORS b/src/runtime/vendor/google.golang.org/api/AUTHORS deleted file mode 100644 index f07029059..000000000 --- a/src/runtime/vendor/google.golang.org/api/AUTHORS +++ /dev/null @@ -1,11 +0,0 @@ -# This is the official list of authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS files. -# See the latter for an explanation. - -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# Please keep the list sorted. -Google Inc. -LightStep Inc. diff --git a/src/runtime/vendor/google.golang.org/api/CONTRIBUTORS b/src/runtime/vendor/google.golang.org/api/CONTRIBUTORS deleted file mode 100644 index 788677b8f..000000000 --- a/src/runtime/vendor/google.golang.org/api/CONTRIBUTORS +++ /dev/null @@ -1,56 +0,0 @@ -# This is the official list of people who can contribute -# (and typically have contributed) code to the repository. -# The AUTHORS file lists the copyright holders; this file -# lists people. For example, Google employees are listed here -# but not in AUTHORS, because Google holds the copyright. -# -# The submission process automatically checks to make sure -# that people submitting code are listed in this file (by email address). -# -# Names should be added to this file only after verifying that -# the individual or the individual's organization has agreed to -# the appropriate Contributor License Agreement, found here: -# -# https://cla.developers.google.com/about/google-individual -# https://cla.developers.google.com/about/google-corporate -# -# The CLA can be filled out on the web: -# -# https://cla.developers.google.com/ -# -# When adding J Random Contributor's name to this file, -# either J's name or J's organization's name should be -# added to the AUTHORS file, depending on whether the -# individual or corporate CLA was used. - -# Names should be added to this file like so: -# Name -# -# An entry with two email addresses specifies that the -# first address should be used in the submit logs and -# that the second address should be recognized as the -# same person when interacting with Rietveld. - -# Please keep the list sorted. - -Alain Vongsouvanhalainv -Andrew Gerrand -Brad Fitzpatrick -Eric Koleda -Francesc Campoy -Garrick Evans -Glenn Lewis -Ivan Krasin -Jason Hall -Johan Euphrosine -Kostik Shtoyk -Kunpei Sakai -Matthew Dolan -Matthew Whisenhunt -Michael McGreevy -Nick Craig-Wood -Robbie Trencheny -Ross Light -Sarah Adams -Scott Van Woudenberg -Takashi Matsuo diff --git a/src/runtime/vendor/google.golang.org/api/LICENSE b/src/runtime/vendor/google.golang.org/api/LICENSE deleted file mode 100644 index 263aa7a0c..000000000 --- a/src/runtime/vendor/google.golang.org/api/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2011 Google Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/runtime/vendor/google.golang.org/api/support/bundler/bundler.go b/src/runtime/vendor/google.golang.org/api/support/bundler/bundler.go deleted file mode 100644 index 9f8237a03..000000000 --- a/src/runtime/vendor/google.golang.org/api/support/bundler/bundler.go +++ /dev/null @@ -1,407 +0,0 @@ -// Copyright 2016 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package bundler supports bundling (batching) of items. Bundling amortizes an -// action with fixed costs over multiple items. For example, if an API provides -// an RPC that accepts a list of items as input, but clients would prefer -// adding items one at a time, then a Bundler can accept individual items from -// the client and bundle many of them into a single RPC. -// -// This package is experimental and subject to change without notice. -package bundler - -import ( - "context" - "errors" - "reflect" - "sync" - "time" - - "golang.org/x/sync/semaphore" -) - -type mode int - -const ( - DefaultDelayThreshold = time.Second - DefaultBundleCountThreshold = 10 - DefaultBundleByteThreshold = 1e6 // 1M - DefaultBufferedByteLimit = 1e9 // 1G -) - -const ( - none mode = iota - add - addWait -) - -var ( - // ErrOverflow indicates that Bundler's stored bytes exceeds its BufferedByteLimit. - ErrOverflow = errors.New("bundler reached buffered byte limit") - - // ErrOversizedItem indicates that an item's size exceeds the maximum bundle size. - ErrOversizedItem = errors.New("item size exceeds bundle byte limit") - - // errMixedMethods indicates that mutually exclusive methods has been - // called subsequently. - errMixedMethods = errors.New("calls to Add and AddWait cannot be mixed") -) - -// A Bundler collects items added to it into a bundle until the bundle -// exceeds a given size, then calls a user-provided function to handle the -// bundle. -// -// The exported fields are only safe to modify prior to the first call to Add -// or AddWait. -type Bundler struct { - // Starting from the time that the first message is added to a bundle, once - // this delay has passed, handle the bundle. The default is DefaultDelayThreshold. - DelayThreshold time.Duration - - // Once a bundle has this many items, handle the bundle. Since only one - // item at a time is added to a bundle, no bundle will exceed this - // threshold, so it also serves as a limit. The default is - // DefaultBundleCountThreshold. - BundleCountThreshold int - - // Once the number of bytes in current bundle reaches this threshold, handle - // the bundle. The default is DefaultBundleByteThreshold. This triggers handling, - // but does not cap the total size of a bundle. - BundleByteThreshold int - - // The maximum size of a bundle, in bytes. Zero means unlimited. - BundleByteLimit int - - // The maximum number of bytes that the Bundler will keep in memory before - // returning ErrOverflow. The default is DefaultBufferedByteLimit. - BufferedByteLimit int - - // The maximum number of handler invocations that can be running at once. - // The default is 1. - HandlerLimit int - - handler func(interface{}) // called to handle a bundle - itemSliceZero reflect.Value // nil (zero value) for slice of items - - mu sync.Mutex // guards access to fields below - flushTimer *time.Timer // implements DelayThreshold - handlerCount int // # of bundles currently being handled (i.e. handler is invoked on them) - sem *semaphore.Weighted // enforces BufferedByteLimit - semOnce sync.Once // guards semaphore initialization - // The current bundle we're adding items to. Not yet in the queue. - // Appended to the queue once the flushTimer fires or the bundle - // thresholds/limits are reached. If curBundle is nil and tail is - // not, we first try to add items to tail. Once tail is full or handled, - // we create a new curBundle for the incoming item. - curBundle *bundle - // The next bundle in the queue to be handled. Nil if the queue is - // empty. - head *bundle - // The last bundle in the queue to be handled. Nil if the queue is - // empty. If curBundle is nil and tail isn't, we attempt to add new - // items to the tail until if becomes full or has been passed to the - // handler. - tail *bundle - curFlush *sync.WaitGroup // counts outstanding bundles since last flush - prevFlush chan bool // signal used to wait for prior flush - - // The first call to Add or AddWait, mode will be add or addWait respectively. - // If there wasn't call yet then mode is none. - mode mode - // TODO: consider alternative queue implementation for head/tail bundle. see: - // https://code-review.googlesource.com/c/google-api-go-client/+/47991/4/support/bundler/bundler.go#74 -} - -// A bundle is a group of items that were added individually and will be passed -// to a handler as a slice. -type bundle struct { - items reflect.Value // slice of T - size int // size in bytes of all items - next *bundle // bundles are handled in order as a linked list queue - flush *sync.WaitGroup // the counter that tracks flush completion -} - -// add appends item to this bundle and increments the total size. It requires -// that b.mu is locked. -func (bu *bundle) add(item interface{}, size int) { - bu.items = reflect.Append(bu.items, reflect.ValueOf(item)) - bu.size += size -} - -// NewBundler creates a new Bundler. -// -// itemExample is a value of the type that will be bundled. For example, if you -// want to create bundles of *Entry, you could pass &Entry{} for itemExample. -// -// handler is a function that will be called on each bundle. If itemExample is -// of type T, the argument to handler is of type []T. handler is always called -// sequentially for each bundle, and never in parallel. -// -// Configure the Bundler by setting its thresholds and limits before calling -// any of its methods. -func NewBundler(itemExample interface{}, handler func(interface{})) *Bundler { - b := &Bundler{ - DelayThreshold: DefaultDelayThreshold, - BundleCountThreshold: DefaultBundleCountThreshold, - BundleByteThreshold: DefaultBundleByteThreshold, - BufferedByteLimit: DefaultBufferedByteLimit, - HandlerLimit: 1, - - handler: handler, - itemSliceZero: reflect.Zero(reflect.SliceOf(reflect.TypeOf(itemExample))), - curFlush: &sync.WaitGroup{}, - } - return b -} - -func (b *Bundler) initSemaphores() { - // Create the semaphores lazily, because the user may set limits - // after NewBundler. - b.semOnce.Do(func() { - b.sem = semaphore.NewWeighted(int64(b.BufferedByteLimit)) - }) -} - -// enqueueCurBundle moves curBundle to the end of the queue. The bundle may be -// handled immediately if we are below HandlerLimit. It requires that b.mu is -// locked. -func (b *Bundler) enqueueCurBundle() { - // We don't require callers to check if there is a pending bundle. It - // may have already been appended to the queue. If so, return early. - if b.curBundle == nil { - return - } - // If we are below the HandlerLimit, the queue must be empty. Handle - // immediately with a new goroutine. - if b.handlerCount < b.HandlerLimit { - b.handlerCount++ - go b.handle(b.curBundle) - } else if b.tail != nil { - // There are bundles on the queue, so append to the end - b.tail.next = b.curBundle - b.tail = b.curBundle - } else { - // The queue is empty, so initialize the queue - b.head = b.curBundle - b.tail = b.curBundle - } - b.curBundle = nil - if b.flushTimer != nil { - b.flushTimer.Stop() - b.flushTimer = nil - } -} - -// setMode sets the state of Bundler's mode. If mode was defined before -// and passed state is different from it then return an error. -func (b *Bundler) setMode(m mode) error { - b.mu.Lock() - defer b.mu.Unlock() - if b.mode == m || b.mode == none { - b.mode = m - return nil - } - return errMixedMethods -} - -// canFit returns true if bu can fit an additional item of size bytes based -// on the limits of Bundler b. -func (b *Bundler) canFit(bu *bundle, size int) bool { - return (b.BundleByteLimit <= 0 || bu.size+size <= b.BundleByteLimit) && - (b.BundleCountThreshold <= 0 || bu.items.Len() < b.BundleCountThreshold) -} - -// Add adds item to the current bundle. It marks the bundle for handling and -// starts a new one if any of the thresholds or limits are exceeded. -// The type of item must be assignable to the itemExample parameter of the NewBundler -// method, otherwise there will be a panic. -// -// If the item's size exceeds the maximum bundle size (Bundler.BundleByteLimit), then -// the item can never be handled. Add returns ErrOversizedItem in this case. -// -// If adding the item would exceed the maximum memory allowed -// (Bundler.BufferedByteLimit) or an AddWait call is blocked waiting for -// memory, Add returns ErrOverflow. -// -// Add never blocks. -func (b *Bundler) Add(item interface{}, size int) error { - if err := b.setMode(add); err != nil { - return err - } - // If this item exceeds the maximum size of a bundle, - // we can never send it. - if b.BundleByteLimit > 0 && size > b.BundleByteLimit { - return ErrOversizedItem - } - - // If adding this item would exceed our allotted memory - // footprint, we can't accept it. - // (TryAcquire also returns false if anything is waiting on the semaphore, - // so calls to Add and AddWait shouldn't be mixed.) - b.initSemaphores() - if !b.sem.TryAcquire(int64(size)) { - return ErrOverflow - } - - b.mu.Lock() - defer b.mu.Unlock() - return b.add(item, size) -} - -// add adds item to the tail of the bundle queue or curBundle depending on space -// and nil-ness (see inline comments). It marks curBundle for handling (by -// appending it to the queue) if any of the thresholds or limits are exceeded. -// curBundle is lazily initialized. It requires that b.mu is locked. -func (b *Bundler) add(item interface{}, size int) error { - // If we don't have a curBundle, see if we can add to the queue tail. - if b.tail != nil && b.curBundle == nil && b.canFit(b.tail, size) { - b.tail.add(item, size) - return nil - } - - // If we can't fit in the existing curBundle, move it onto the queue. - if b.curBundle != nil && !b.canFit(b.curBundle, size) { - b.enqueueCurBundle() - } - - // Create a curBundle if we don't have one. - if b.curBundle == nil { - b.curFlush.Add(1) - b.curBundle = &bundle{ - items: b.itemSliceZero, - flush: b.curFlush, - } - } - - // Add the item. - b.curBundle.add(item, size) - - // If curBundle is ready for handling, move it to the queue. - if b.curBundle.size >= b.BundleByteThreshold || - b.curBundle.items.Len() == b.BundleCountThreshold { - b.enqueueCurBundle() - } - - // If we created a new bundle and it wasn't immediately handled, set a timer - if b.curBundle != nil && b.flushTimer == nil { - b.flushTimer = time.AfterFunc(b.DelayThreshold, b.tryHandleBundles) - } - - return nil -} - -// tryHandleBundles is the timer callback that handles or queues any current -// bundle after DelayThreshold time, even if the bundle isn't completely full. -func (b *Bundler) tryHandleBundles() { - b.mu.Lock() - b.enqueueCurBundle() - b.mu.Unlock() -} - -// next returns the next bundle that is ready for handling and removes it from -// the internal queue. It requires that b.mu is locked. -func (b *Bundler) next() *bundle { - if b.head == nil { - return nil - } - out := b.head - b.head = b.head.next - if b.head == nil { - b.tail = nil - } - out.next = nil - return out -} - -// handle calls the user-specified handler on the given bundle. handle is -// intended to be run as a goroutine. After the handler returns, we update the -// byte total. handle continues processing additional bundles that are ready. -// If no more bundles are ready, the handler count is decremented and the -// goroutine ends. -func (b *Bundler) handle(bu *bundle) { - for bu != nil { - b.handler(bu.items.Interface()) - bu = b.postHandle(bu) - } -} - -func (b *Bundler) postHandle(bu *bundle) *bundle { - b.mu.Lock() - defer b.mu.Unlock() - - b.sem.Release(int64(bu.size)) - bu.flush.Done() - - bu = b.next() - if bu == nil { - b.handlerCount-- - } - return bu -} - -// AddWait adds item to the current bundle. It marks the bundle for handling and -// starts a new one if any of the thresholds or limits are exceeded. -// -// If the item's size exceeds the maximum bundle size (Bundler.BundleByteLimit), then -// the item can never be handled. AddWait returns ErrOversizedItem in this case. -// -// If adding the item would exceed the maximum memory allowed (Bundler.BufferedByteLimit), -// AddWait blocks until space is available or ctx is done. -// -// Calls to Add and AddWait should not be mixed on the same Bundler. -func (b *Bundler) AddWait(ctx context.Context, item interface{}, size int) error { - if err := b.setMode(addWait); err != nil { - return err - } - // If this item exceeds the maximum size of a bundle, - // we can never send it. - if b.BundleByteLimit > 0 && size > b.BundleByteLimit { - return ErrOversizedItem - } - // If adding this item would exceed our allotted memory footprint, block - // until space is available. The semaphore is FIFO, so there will be no - // starvation. - b.initSemaphores() - if err := b.sem.Acquire(ctx, int64(size)); err != nil { - return err - } - - b.mu.Lock() - defer b.mu.Unlock() - return b.add(item, size) -} - -// Flush invokes the handler for all remaining items in the Bundler and waits -// for it to return. -func (b *Bundler) Flush() { - b.mu.Lock() - - // If a curBundle is pending, move it to the queue. - b.enqueueCurBundle() - - // Store a pointer to the WaitGroup that counts outstanding bundles - // in the current flush and create a new one to track the next flush. - wg := b.curFlush - b.curFlush = &sync.WaitGroup{} - - // Flush must wait for all prior, outstanding flushes to complete. - // We use a channel to communicate completion between each flush in - // the sequence. - prev := b.prevFlush - next := make(chan bool) - b.prevFlush = next - - b.mu.Unlock() - - // Wait until the previous flush is finished. - if prev != nil { - <-prev - } - - // Wait until this flush is finished. - wg.Wait() - - // Allow the next flush to finish. - close(next) -} diff --git a/src/runtime/vendor/google.golang.org/grpc/.travis.yml b/src/runtime/vendor/google.golang.org/grpc/.travis.yml index 3e495fa23..5847d94e5 100644 --- a/src/runtime/vendor/google.golang.org/grpc/.travis.yml +++ b/src/runtime/vendor/google.golang.org/grpc/.travis.yml @@ -35,7 +35,7 @@ install: script: - set -e - - if [[ -n "${TESTEXTRAS}" ]]; then examples/examples_test.sh; interop/interop_test.sh; make testsubmodule; exit 0; fi + - if [[ -n "${TESTEXTRAS}" ]]; then examples/examples_test.sh; security/advancedtls/examples/examples_test.sh; interop/interop_test.sh; make testsubmodule; exit 0; fi - if [[ -n "${VET}" ]]; then ./vet.sh; fi - if [[ -n "${GAE}" ]]; then make testappengine; exit 0; fi - if [[ -n "${RACE}" ]]; then make testrace; exit 0; fi diff --git a/src/runtime/vendor/google.golang.org/grpc/Makefile b/src/runtime/vendor/google.golang.org/grpc/Makefile index 3f661a787..1f0722f16 100644 --- a/src/runtime/vendor/google.golang.org/grpc/Makefile +++ b/src/runtime/vendor/google.golang.org/grpc/Makefile @@ -1,13 +1,13 @@ all: vet test testrace -build: deps +build: go build google.golang.org/grpc/... clean: go clean -i google.golang.org/grpc/... deps: - go get -d -v google.golang.org/grpc/... + GO111MODULE=on go get -d -v google.golang.org/grpc/... proto: @ if ! which protoc > /dev/null; then \ @@ -16,30 +16,18 @@ proto: fi go generate google.golang.org/grpc/... -test: testdeps +test: go test -cpu 1,4 -timeout 7m google.golang.org/grpc/... -testsubmodule: testdeps +testsubmodule: cd security/advancedtls && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/advancedtls/... cd security/authorization && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/authorization/... -testappengine: testappenginedeps - goapp test -cpu 1,4 -timeout 7m google.golang.org/grpc/... - -testappenginedeps: - goapp get -d -v -t -tags 'appengine appenginevm' google.golang.org/grpc/... - -testdeps: - go get -d -v -t google.golang.org/grpc/... - -testrace: testdeps +testrace: go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/... -updatedeps: - go get -d -v -u -f google.golang.org/grpc/... - -updatetestdeps: - go get -d -v -t -u -f google.golang.org/grpc/... +testdeps: + GO111MODULE=on go get -d -v -t google.golang.org/grpc/... vet: vetdeps ./vet.sh @@ -51,14 +39,10 @@ vetdeps: all \ build \ clean \ - deps \ proto \ test \ testappengine \ testappenginedeps \ - testdeps \ testrace \ - updatedeps \ - updatetestdeps \ vet \ vetdeps diff --git a/src/runtime/vendor/google.golang.org/grpc/SECURITY.md b/src/runtime/vendor/google.golang.org/grpc/SECURITY.md new file mode 100644 index 000000000..be6e10870 --- /dev/null +++ b/src/runtime/vendor/google.golang.org/grpc/SECURITY.md @@ -0,0 +1,3 @@ +# Security Policy + +For information on gRPC Security Policy and reporting potentional security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). diff --git a/src/runtime/vendor/google.golang.org/grpc/balancer/balancer.go b/src/runtime/vendor/google.golang.org/grpc/balancer/balancer.go index 8bf359dbf..788759bde 100644 --- a/src/runtime/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/src/runtime/vendor/google.golang.org/grpc/balancer/balancer.go @@ -174,6 +174,10 @@ type BuildOptions struct { Dialer func(context.Context, string) (net.Conn, error) // ChannelzParentID is the entity parent's channelz unique identification number. ChannelzParentID int64 + // CustomUserAgent is the custom user agent set on the parent ClientConn. + // The balancer should set the same custom user agent if it creates a + // ClientConn. + CustomUserAgent string // Target contains the parsed address info of the dial target. It is the same resolver.Target as // passed to the resolver. // See the documentation for the resolver.Target type for details about what it contains. diff --git a/src/runtime/vendor/google.golang.org/grpc/balancer/base/balancer.go b/src/runtime/vendor/google.golang.org/grpc/balancer/base/balancer.go index 32d782f1c..e0d34288c 100644 --- a/src/runtime/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/src/runtime/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -64,7 +64,7 @@ type baseBalancer struct { csEvltr *balancer.ConnectivityStateEvaluator state connectivity.State - subConns map[resolver.Address]balancer.SubConn + subConns map[resolver.Address]balancer.SubConn // `attributes` is stripped from the keys of this map (the addresses) scStates map[balancer.SubConn]connectivity.State picker balancer.Picker config Config @@ -101,17 +101,41 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { // addrsSet is the set converted from addrs, it's used for quick lookup of an address. addrsSet := make(map[resolver.Address]struct{}) for _, a := range s.ResolverState.Addresses { - addrsSet[a] = struct{}{} - if _, ok := b.subConns[a]; !ok { + // Strip attributes from addresses before using them as map keys. So + // that when two addresses only differ in attributes pointers (but with + // the same attribute content), they are considered the same address. + // + // Note that this doesn't handle the case where the attribute content is + // different. So if users want to set different attributes to create + // duplicate connections to the same backend, it doesn't work. This is + // fine for now, because duplicate is done by setting Metadata today. + // + // TODO: read attributes to handle duplicate connections. + aNoAttrs := a + aNoAttrs.Attributes = nil + addrsSet[aNoAttrs] = struct{}{} + if sc, ok := b.subConns[aNoAttrs]; !ok { // a is a new address (not existing in b.subConns). + // + // When creating SubConn, the original address with attributes is + // passed through. So that connection configurations in attributes + // (like creds) will be used. sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) if err != nil { logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) continue } - b.subConns[a] = sc + b.subConns[aNoAttrs] = sc b.scStates[sc] = connectivity.Idle sc.Connect() + } else { + // Always update the subconn's address in case the attributes + // changed. + // + // The SubConn does a reflect.DeepEqual of the new and old + // addresses. So this is a noop if the current address is the same + // as the old one (including attributes). + sc.UpdateAddresses([]resolver.Address{a}) } } for a, sc := range b.subConns { diff --git a/src/runtime/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/src/runtime/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index da0472967..ed75290cd 100644 --- a/src/runtime/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/src/runtime/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -19,17 +19,17 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.25.0 -// protoc v3.3.0 +// protoc v3.14.0 // source: grpc/binlog/v1/binarylog.proto package grpc_binarylog_v1 import ( proto "github.com/golang/protobuf/proto" - duration "github.com/golang/protobuf/ptypes/duration" - timestamp "github.com/golang/protobuf/ptypes/timestamp" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" sync "sync" ) @@ -243,7 +243,7 @@ type GrpcLogEntry struct { unknownFields protoimpl.UnknownFields // The timestamp of the binary log message - Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` // Uniquely identifies a call. The value must not be 0 in order to disambiguate // from an unset value. // Each call may have several log entries, they will all have the same call_id. @@ -308,7 +308,7 @@ func (*GrpcLogEntry) Descriptor() ([]byte, []int) { return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0} } -func (x *GrpcLogEntry) GetTimestamp() *timestamp.Timestamp { +func (x *GrpcLogEntry) GetTimestamp() *timestamppb.Timestamp { if x != nil { return x.Timestamp } @@ -439,7 +439,7 @@ type ClientHeader struct { // or : . Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` // the RPC timeout - Timeout *duration.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"` + Timeout *durationpb.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"` } func (x *ClientHeader) Reset() { @@ -495,7 +495,7 @@ func (x *ClientHeader) GetAuthority() string { return "" } -func (x *ClientHeader) GetTimeout() *duration.Duration { +func (x *ClientHeader) GetTimeout() *durationpb.Duration { if x != nil { return x.Timeout } @@ -1020,19 +1020,19 @@ func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte { var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3) var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8) var file_grpc_binlog_v1_binarylog_proto_goTypes = []interface{}{ - (GrpcLogEntry_EventType)(0), // 0: grpc.binarylog.v1.GrpcLogEntry.EventType - (GrpcLogEntry_Logger)(0), // 1: grpc.binarylog.v1.GrpcLogEntry.Logger - (Address_Type)(0), // 2: grpc.binarylog.v1.Address.Type - (*GrpcLogEntry)(nil), // 3: grpc.binarylog.v1.GrpcLogEntry - (*ClientHeader)(nil), // 4: grpc.binarylog.v1.ClientHeader - (*ServerHeader)(nil), // 5: grpc.binarylog.v1.ServerHeader - (*Trailer)(nil), // 6: grpc.binarylog.v1.Trailer - (*Message)(nil), // 7: grpc.binarylog.v1.Message - (*Metadata)(nil), // 8: grpc.binarylog.v1.Metadata - (*MetadataEntry)(nil), // 9: grpc.binarylog.v1.MetadataEntry - (*Address)(nil), // 10: grpc.binarylog.v1.Address - (*timestamp.Timestamp)(nil), // 11: google.protobuf.Timestamp - (*duration.Duration)(nil), // 12: google.protobuf.Duration + (GrpcLogEntry_EventType)(0), // 0: grpc.binarylog.v1.GrpcLogEntry.EventType + (GrpcLogEntry_Logger)(0), // 1: grpc.binarylog.v1.GrpcLogEntry.Logger + (Address_Type)(0), // 2: grpc.binarylog.v1.Address.Type + (*GrpcLogEntry)(nil), // 3: grpc.binarylog.v1.GrpcLogEntry + (*ClientHeader)(nil), // 4: grpc.binarylog.v1.ClientHeader + (*ServerHeader)(nil), // 5: grpc.binarylog.v1.ServerHeader + (*Trailer)(nil), // 6: grpc.binarylog.v1.Trailer + (*Message)(nil), // 7: grpc.binarylog.v1.Message + (*Metadata)(nil), // 8: grpc.binarylog.v1.Metadata + (*MetadataEntry)(nil), // 9: grpc.binarylog.v1.MetadataEntry + (*Address)(nil), // 10: grpc.binarylog.v1.Address + (*timestamppb.Timestamp)(nil), // 11: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 12: google.protobuf.Duration } var file_grpc_binlog_v1_binarylog_proto_depIdxs = []int32{ 11, // 0: grpc.binarylog.v1.GrpcLogEntry.timestamp:type_name -> google.protobuf.Timestamp diff --git a/src/runtime/vendor/google.golang.org/grpc/clientconn.go b/src/runtime/vendor/google.golang.org/grpc/clientconn.go index cbd671a85..77a08fd33 100644 --- a/src/runtime/vendor/google.golang.org/grpc/clientconn.go +++ b/src/runtime/vendor/google.golang.org/grpc/clientconn.go @@ -23,7 +23,6 @@ import ( "errors" "fmt" "math" - "net" "reflect" "strings" "sync" @@ -39,6 +38,7 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpcutil" + iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" @@ -48,6 +48,7 @@ import ( _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. _ "google.golang.org/grpc/internal/resolver/dns" // To register dns resolver. _ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver. + _ "google.golang.org/grpc/internal/resolver/unix" // To register unix resolver. ) const ( @@ -104,6 +105,17 @@ func Dial(target string, opts ...DialOption) (*ClientConn, error) { return DialContext(context.Background(), target, opts...) } +type defaultConfigSelector struct { + sc *ServiceConfig +} + +func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RPCConfig, error) { + return &iresolver.RPCConfig{ + Context: rpcInfo.Context, + MethodConfig: getMethodConfig(dcs.sc, rpcInfo.Method), + }, nil +} + // DialContext creates a client connection to the given target. By default, it's // a non-blocking dial (the function won't wait for connections to be // established, and connecting happens in the background). To make it a blocking @@ -191,16 +203,6 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } cc.mkp = cc.dopts.copts.KeepaliveParams - if cc.dopts.copts.Dialer == nil { - cc.dopts.copts.Dialer = func(ctx context.Context, addr string) (net.Conn, error) { - network, addr := parseDialTarget(addr) - return (&net.Dialer{}).DialContext(ctx, network, addr) - } - if cc.dopts.withProxy { - cc.dopts.copts.Dialer = newProxyDialer(cc.dopts.copts.Dialer) - } - } - if cc.dopts.copts.UserAgent != "" { cc.dopts.copts.UserAgent += " " + grpcUA } else { @@ -234,6 +236,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * case sc, ok := <-cc.dopts.scChan: if ok { cc.sc = &sc + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) scSet = true } default: @@ -244,8 +247,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } // Determine the resolver to use. - cc.parsedTarget = grpcutil.ParseTarget(cc.target) - unixScheme := strings.HasPrefix(cc.target, "unix:") + cc.parsedTarget = grpcutil.ParseTarget(cc.target, cc.dopts.copts.Dialer != nil) channelz.Infof(logger, cc.channelzID, "parsed scheme: %q", cc.parsedTarget.Scheme) resolverBuilder := cc.getResolver(cc.parsedTarget.Scheme) if resolverBuilder == nil { @@ -268,8 +270,10 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * cc.authority = creds.Info().ServerName } else if cc.dopts.insecure && cc.dopts.authority != "" { cc.authority = cc.dopts.authority - } else if unixScheme { + } else if strings.HasPrefix(cc.target, "unix:") || strings.HasPrefix(cc.target, "unix-abstract:") { cc.authority = "localhost" + } else if strings.HasPrefix(cc.parsedTarget.Endpoint, ":") { + cc.authority = "localhost" + cc.parsedTarget.Endpoint } else { // Use endpoint from "scheme://authority/endpoint" as the default // authority for ClientConn. @@ -282,6 +286,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * case sc, ok := <-cc.dopts.scChan: if ok { cc.sc = &sc + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) } case <-ctx.Done(): return nil, ctx.Err() @@ -299,6 +304,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * DialCreds: credsClone, CredsBundle: cc.dopts.copts.CredsBundle, Dialer: cc.dopts.copts.Dialer, + CustomUserAgent: cc.dopts.copts.UserAgent, ChannelzParentID: cc.channelzID, Target: cc.parsedTarget, } @@ -487,6 +493,8 @@ type ClientConn struct { balancerBuildOpts balancer.BuildOptions blockingpicker *pickerWrapper + safeConfigSelector iresolver.SafeConfigSelector + mu sync.RWMutex resolverWrapper *ccResolverWrapper sc *ServiceConfig @@ -547,6 +555,7 @@ func (cc *ClientConn) scWatcher() { // TODO: load balance policy runtime change is ignored. // We may revisit this decision in the future. cc.sc = &sc + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) cc.mu.Unlock() case <-cc.ctx.Done(): return @@ -585,13 +594,13 @@ func init() { func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { if cc.sc != nil { - cc.applyServiceConfigAndBalancer(cc.sc, addrs) + cc.applyServiceConfigAndBalancer(cc.sc, nil, addrs) return } if cc.dopts.defaultServiceConfig != nil { - cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, addrs) + cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, &defaultConfigSelector{cc.dopts.defaultServiceConfig}, addrs) } else { - cc.applyServiceConfigAndBalancer(emptyServiceConfig, addrs) + cc.applyServiceConfigAndBalancer(emptyServiceConfig, &defaultConfigSelector{emptyServiceConfig}, addrs) } } @@ -628,7 +637,15 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { // default, per the error handling design? } else { if sc, ok := s.ServiceConfig.Config.(*ServiceConfig); s.ServiceConfig.Err == nil && ok { - cc.applyServiceConfigAndBalancer(sc, s.Addresses) + configSelector := iresolver.GetConfigSelector(s) + if configSelector != nil { + if len(s.ServiceConfig.Config.(*ServiceConfig).Methods) != 0 { + channelz.Infof(logger, cc.channelzID, "method configs in service config will be ignored due to presence of config selector") + } + } else { + configSelector = &defaultConfigSelector{sc} + } + cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses) } else { ret = balancer.ErrBadResolverState if cc.balancerWrapper == nil { @@ -638,6 +655,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { } else { err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config) } + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{cc.sc}) cc.blockingpicker.updatePicker(base.NewErrPicker(err)) cc.csMgr.updateState(connectivity.TransientFailure) cc.mu.Unlock() @@ -872,6 +890,20 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { return curAddrFound } +func getMethodConfig(sc *ServiceConfig, method string) MethodConfig { + if sc == nil { + return MethodConfig{} + } + if m, ok := sc.Methods[method]; ok { + return m + } + i := strings.LastIndex(method, "/") + if m, ok := sc.Methods[method[:i+1]]; ok { + return m + } + return sc.Methods[""] +} + // GetMethodConfig gets the method config of the input method. // If there's an exact match for input method (i.e. /service/method), we return // the corresponding MethodConfig. @@ -884,17 +916,7 @@ func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { // TODO: Avoid the locking here. cc.mu.RLock() defer cc.mu.RUnlock() - if cc.sc == nil { - return MethodConfig{} - } - if m, ok := cc.sc.Methods[method]; ok { - return m - } - i := strings.LastIndex(method, "/") - if m, ok := cc.sc.Methods[method[:i+1]]; ok { - return m - } - return cc.sc.Methods[""] + return getMethodConfig(cc.sc, method) } func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { @@ -917,12 +939,15 @@ func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method st return t, done, nil } -func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, addrs []resolver.Address) { +func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) { if sc == nil { // should never reach here. return } cc.sc = sc + if configSelector != nil { + cc.safeConfigSelector.UpdateConfigSelector(configSelector) + } if cc.sc.retryThrottling != nil { newThrottler := &retryThrottler{ diff --git a/src/runtime/vendor/google.golang.org/grpc/credentials/credentials.go b/src/runtime/vendor/google.golang.org/grpc/credentials/credentials.go index 02766443a..e69562e78 100644 --- a/src/runtime/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/src/runtime/vendor/google.golang.org/grpc/credentials/credentials.go @@ -58,9 +58,9 @@ type PerRPCCredentials interface { type SecurityLevel int const ( - // Invalid indicates an invalid security level. + // InvalidSecurityLevel indicates an invalid security level. // The zero SecurityLevel value is invalid for backward compatibility. - Invalid SecurityLevel = iota + InvalidSecurityLevel SecurityLevel = iota // NoSecurity indicates a connection is insecure. NoSecurity // IntegrityOnly indicates a connection only provides integrity protection. @@ -92,7 +92,7 @@ type CommonAuthInfo struct { } // GetCommonAuthInfo returns the pointer to CommonAuthInfo struct. -func (c *CommonAuthInfo) GetCommonAuthInfo() *CommonAuthInfo { +func (c CommonAuthInfo) GetCommonAuthInfo() CommonAuthInfo { return c } @@ -229,17 +229,16 @@ func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo { // or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility. // // This API is experimental. -func CheckSecurityLevel(ctx context.Context, level SecurityLevel) error { +func CheckSecurityLevel(ai AuthInfo, level SecurityLevel) error { type internalInfo interface { - GetCommonAuthInfo() *CommonAuthInfo + GetCommonAuthInfo() CommonAuthInfo } - ri, _ := RequestInfoFromContext(ctx) - if ri.AuthInfo == nil { - return errors.New("unable to obtain SecurityLevel from context") + if ai == nil { + return errors.New("AuthInfo is nil") } - if ci, ok := ri.AuthInfo.(internalInfo); ok { + if ci, ok := ai.(internalInfo); ok { // CommonAuthInfo.SecurityLevel has an invalid value. - if ci.GetCommonAuthInfo().SecurityLevel == Invalid { + if ci.GetCommonAuthInfo().SecurityLevel == InvalidSecurityLevel { return nil } if ci.GetCommonAuthInfo().SecurityLevel < level { diff --git a/src/runtime/vendor/google.golang.org/grpc/dialoptions.go b/src/runtime/vendor/google.golang.org/grpc/dialoptions.go index a93fcab8f..e7f86e6d7 100644 --- a/src/runtime/vendor/google.golang.org/grpc/dialoptions.go +++ b/src/runtime/vendor/google.golang.org/grpc/dialoptions.go @@ -71,7 +71,6 @@ type dialOptions struct { // we need to be able to configure this in tests. resolveNowBackoff func(int) time.Duration resolvers []resolver.Builder - withProxy bool } // DialOption configures how we set up the connection. @@ -325,7 +324,7 @@ func WithInsecure() DialOption { // later release. func WithNoProxy() DialOption { return newFuncDialOption(func(o *dialOptions) { - o.withProxy = false + o.copts.UseProxy = false }) } @@ -595,9 +594,9 @@ func defaultDialOptions() dialOptions { copts: transport.ConnectOptions{ WriteBufferSize: defaultWriteBufSize, ReadBufferSize: defaultReadBufSize, + UseProxy: true, }, resolveNowBackoff: internalbackoff.DefaultExponential.Backoff, - withProxy: true, } } diff --git a/src/runtime/vendor/google.golang.org/grpc/encoding/proto/proto.go b/src/runtime/vendor/google.golang.org/grpc/encoding/proto/proto.go index 66b97a6f6..3009b35af 100644 --- a/src/runtime/vendor/google.golang.org/grpc/encoding/proto/proto.go +++ b/src/runtime/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -21,8 +21,7 @@ package proto import ( - "math" - "sync" + "fmt" "github.com/golang/protobuf/proto" "google.golang.org/grpc/encoding" @@ -38,73 +37,22 @@ func init() { // codec is a Codec implementation with protobuf. It is the default codec for gRPC. type codec struct{} -type cachedProtoBuffer struct { - lastMarshaledSize uint32 - proto.Buffer -} - -func capToMaxInt32(val int) uint32 { - if val > math.MaxInt32 { - return uint32(math.MaxInt32) - } - return uint32(val) -} - -func marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) { - protoMsg := v.(proto.Message) - newSlice := make([]byte, 0, cb.lastMarshaledSize) - - cb.SetBuf(newSlice) - cb.Reset() - if err := cb.Marshal(protoMsg); err != nil { - return nil, err - } - out := cb.Bytes() - cb.lastMarshaledSize = capToMaxInt32(len(out)) - return out, nil -} - func (codec) Marshal(v interface{}) ([]byte, error) { - if pm, ok := v.(proto.Marshaler); ok { - // object can marshal itself, no need for buffer - return pm.Marshal() + vv, ok := v.(proto.Message) + if !ok { + return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) } - - cb := protoBufferPool.Get().(*cachedProtoBuffer) - out, err := marshal(v, cb) - - // put back buffer and lose the ref to the slice - cb.SetBuf(nil) - protoBufferPool.Put(cb) - return out, err + return proto.Marshal(vv) } func (codec) Unmarshal(data []byte, v interface{}) error { - protoMsg := v.(proto.Message) - protoMsg.Reset() - - if pu, ok := protoMsg.(proto.Unmarshaler); ok { - // object can unmarshal itself, no need for buffer - return pu.Unmarshal(data) + vv, ok := v.(proto.Message) + if !ok { + return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) } - - cb := protoBufferPool.Get().(*cachedProtoBuffer) - cb.SetBuf(data) - err := cb.Unmarshal(protoMsg) - cb.SetBuf(nil) - protoBufferPool.Put(cb) - return err + return proto.Unmarshal(data, vv) } func (codec) Name() string { return Name } - -var protoBufferPool = &sync.Pool{ - New: func() interface{} { - return &cachedProtoBuffer{ - Buffer: proto.Buffer{}, - lastMarshaledSize: 16, - } - }, -} diff --git a/src/runtime/vendor/google.golang.org/grpc/go.mod b/src/runtime/vendor/google.golang.org/grpc/go.mod index d97276556..cab74e557 100644 --- a/src/runtime/vendor/google.golang.org/grpc/go.mod +++ b/src/runtime/vendor/google.golang.org/grpc/go.mod @@ -3,10 +3,10 @@ module google.golang.org/grpc go 1.11 require ( - github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f - github.com/envoyproxy/go-control-plane v0.9.4 + github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 + github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b - github.com/golang/protobuf v1.4.1 + github.com/golang/protobuf v1.4.2 github.com/google/go-cmp v0.5.0 github.com/google/uuid v1.1.2 golang.org/x/net v0.0.0-20190311183353-d8887717615a diff --git a/src/runtime/vendor/google.golang.org/grpc/go.sum b/src/runtime/vendor/google.golang.org/grpc/go.sum index 356a82e3d..77ee70b44 100644 --- a/src/runtime/vendor/google.golang.org/grpc/go.sum +++ b/src/runtime/vendor/google.golang.org/grpc/go.sum @@ -1,14 +1,19 @@ cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f h1:WBZRG4aNOuI15bLRrCgN8fCq8E5Xuty6jGbmSNEvSsU= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4 h1:rEvIZUSZ3fx39WIi3JkQqQBitGwpELBIYWeBVh6wn+E= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad h1:EmNYJhPYy0pOFjCx2PrgtaBXmee0iUX9hLlxE1xHOJE= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= @@ -21,18 +26,24 @@ github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:x github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -63,7 +74,6 @@ google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9Ywl google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= @@ -77,8 +87,13 @@ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/src/runtime/vendor/google.golang.org/grpc/interceptor.go b/src/runtime/vendor/google.golang.org/grpc/interceptor.go index d1a609e48..668e0adcf 100644 --- a/src/runtime/vendor/google.golang.org/grpc/interceptor.go +++ b/src/runtime/vendor/google.golang.org/grpc/interceptor.go @@ -25,25 +25,41 @@ import ( // UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error -// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. invoker is the handler to complete the RPC -// and it is the responsibility of the interceptor to call it. +// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. +// Unary interceptors can be specified as a DialOption, using +// WithUnaryInterceptor() or WithChainUnaryInterceptor(), when creating a +// ClientConn. When a unary interceptor(s) is set on a ClientConn, gRPC +// delegates all unary RPC invocations to the interceptor, and it is the +// responsibility of the interceptor to call invoker to complete the processing +// of the RPC. // -// Experimental +// method is the RPC name. req and reply are the corresponding request and +// response messages. cc is the ClientConn on which the RPC was invoked. invoker +// is the handler to complete the RPC and it is the responsibility of the +// interceptor to call it. opts contain all applicable call options, including +// defaults from the ClientConn as well as per-call options. // -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. +// The returned error must be compatible with the status package. type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error // Streamer is called by StreamClientInterceptor to create a ClientStream. type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) -// StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O -// operations. streamer is the handler to create a ClientStream and it is the responsibility of the interceptor to call it. +// StreamClientInterceptor intercepts the creation of a ClientStream. Stream +// interceptors can be specified as a DialOption, using WithStreamInterceptor() +// or WithChainStreamInterceptor(), when creating a ClientConn. When a stream +// interceptor(s) is set on the ClientConn, gRPC delegates all stream creations +// to the interceptor, and it is the responsibility of the interceptor to call +// streamer. // -// Experimental +// desc contains a description of the stream. cc is the ClientConn on which the +// RPC was invoked. streamer is the handler to create a ClientStream and it is +// the responsibility of the interceptor to call it. opts contain all applicable +// call options, including defaults from the ClientConn as well as per-call +// options. // -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. +// StreamClientInterceptor may return a custom ClientStream to intercept all I/O +// operations. The returned error must be compatible with the status package. type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) // UnaryServerInfo consists of various information about a unary RPC on diff --git a/src/runtime/vendor/google.golang.org/grpc/internal/grpcutil/target.go b/src/runtime/vendor/google.golang.org/grpc/internal/grpcutil/target.go index 80b33cdaf..8833021da 100644 --- a/src/runtime/vendor/google.golang.org/grpc/internal/grpcutil/target.go +++ b/src/runtime/vendor/google.golang.org/grpc/internal/grpcutil/target.go @@ -37,19 +37,53 @@ func split2(s, sep string) (string, string, bool) { } // ParseTarget splits target into a resolver.Target struct containing scheme, -// authority and endpoint. +// authority and endpoint. skipUnixColonParsing indicates that the parse should +// not parse "unix:[path]" cases. This should be true in cases where a custom +// dialer is present, to prevent a behavior change. // -// If target is not a valid scheme://authority/endpoint, it returns {Endpoint: -// target}. -func ParseTarget(target string) (ret resolver.Target) { +// If target is not a valid scheme://authority/endpoint as specified in +// https://github.com/grpc/grpc/blob/master/doc/naming.md, +// it returns {Endpoint: target}. +func ParseTarget(target string, skipUnixColonParsing bool) (ret resolver.Target) { var ok bool + if strings.HasPrefix(target, "unix-abstract:") { + if strings.HasPrefix(target, "unix-abstract://") { + // Maybe, with Authority specified, try to parse it + var remain string + ret.Scheme, remain, _ = split2(target, "://") + ret.Authority, ret.Endpoint, ok = split2(remain, "/") + if !ok { + // No Authority, add the "//" back + ret.Endpoint = "//" + remain + } else { + // Found Authority, add the "/" back + ret.Endpoint = "/" + ret.Endpoint + } + } else { + // Without Authority specified, split target on ":" + ret.Scheme, ret.Endpoint, _ = split2(target, ":") + } + return ret + } ret.Scheme, ret.Endpoint, ok = split2(target, "://") if !ok { + if strings.HasPrefix(target, "unix:") && !skipUnixColonParsing { + // Handle the "unix:[local/path]" and "unix:[/absolute/path]" cases, + // because splitting on :// only handles the + // "unix://[/absolute/path]" case. Only handle if the dialer is nil, + // to avoid a behavior change with custom dialers. + return resolver.Target{Scheme: "unix", Endpoint: target[len("unix:"):]} + } return resolver.Target{Endpoint: target} } ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/") if !ok { return resolver.Target{Endpoint: target} } + if ret.Scheme == "unix" { + // Add the "/" back in the unix case, so the unix resolver receives the + // actual endpoint in the "unix://[/absolute/path]" case. + ret.Endpoint = "/" + ret.Endpoint + } return ret } diff --git a/src/runtime/vendor/google.golang.org/grpc/internal/internal.go b/src/runtime/vendor/google.golang.org/grpc/internal/internal.go index 716d92800..1e2834c70 100644 --- a/src/runtime/vendor/google.golang.org/grpc/internal/internal.go +++ b/src/runtime/vendor/google.golang.org/grpc/internal/internal.go @@ -57,6 +57,14 @@ var ( // bootstrap code while parsing certificate provider configs in the // bootstrap file. GetCertificateProviderBuilder interface{} // func(string) certprovider.Builder + // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo + // stored in the passed in attributes. This is set by + // credentials/xds/xds.go. + GetXDSHandshakeInfoForTesting interface{} // func (*attributes.Attributes) *xds.HandshakeInfo + // GetServerCredentials returns the transport credentials configured on a + // gRPC server. An xDS-enabled server needs to know what type of credentials + // is configured on the underlying gRPC server. This is set by server.go. + GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials ) // HealthChecker defines the signature of the client-side LB channel health checking function. diff --git a/src/runtime/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/src/runtime/vendor/google.golang.org/grpc/internal/metadata/metadata.go new file mode 100644 index 000000000..302262613 --- /dev/null +++ b/src/runtime/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -0,0 +1,50 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package metadata contains functions to set and get metadata from addresses. +// +// This package is experimental. +package metadata + +import ( + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" +) + +type mdKeyType string + +const mdKey = mdKeyType("grpc.internal.address.metadata") + +// Get returns the metadata of addr. +func Get(addr resolver.Address) metadata.MD { + attrs := addr.Attributes + if attrs == nil { + return nil + } + md, _ := attrs.Value(mdKey).(metadata.MD) + return md +} + +// Set sets (overrides) the metadata in addr. +// +// When a SubConn is created with this address, the RPCs sent on it will all +// have this metadata. +func Set(addr resolver.Address, md metadata.MD) resolver.Address { + addr.Attributes = addr.Attributes.WithValues(mdKey, md) + return addr +} diff --git a/src/runtime/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/src/runtime/vendor/google.golang.org/grpc/internal/resolver/config_selector.go new file mode 100644 index 000000000..e69900400 --- /dev/null +++ b/src/runtime/vendor/google.golang.org/grpc/internal/resolver/config_selector.go @@ -0,0 +1,95 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package resolver provides internal resolver-related functionality. +package resolver + +import ( + "context" + "sync" + + "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/resolver" +) + +// ConfigSelector controls what configuration to use for every RPC. +type ConfigSelector interface { + // Selects the configuration for the RPC, or terminates it using the error. + // This error will be converted by the gRPC library to a status error with + // code UNKNOWN if it is not returned as a status error. + SelectConfig(RPCInfo) (*RPCConfig, error) +} + +// RPCInfo contains RPC information needed by a ConfigSelector. +type RPCInfo struct { + // Context is the user's context for the RPC and contains headers and + // application timeout. It is passed for interception purposes and for + // efficiency reasons. SelectConfig should not be blocking. + Context context.Context + Method string // i.e. "/Service/Method" +} + +// RPCConfig describes the configuration to use for each RPC. +type RPCConfig struct { + // The context to use for the remainder of the RPC; can pass info to LB + // policy or affect timeout or metadata. + Context context.Context + MethodConfig serviceconfig.MethodConfig // configuration to use for this RPC + OnCommitted func() // Called when the RPC has been committed (retries no longer possible) +} + +type csKeyType string + +const csKey = csKeyType("grpc.internal.resolver.configSelector") + +// SetConfigSelector sets the config selector in state and returns the new +// state. +func SetConfigSelector(state resolver.State, cs ConfigSelector) resolver.State { + state.Attributes = state.Attributes.WithValues(csKey, cs) + return state +} + +// GetConfigSelector retrieves the config selector from state, if present, and +// returns it or nil if absent. +func GetConfigSelector(state resolver.State) ConfigSelector { + cs, _ := state.Attributes.Value(csKey).(ConfigSelector) + return cs +} + +// SafeConfigSelector allows for safe switching of ConfigSelector +// implementations such that previous values are guaranteed to not be in use +// when UpdateConfigSelector returns. +type SafeConfigSelector struct { + mu sync.RWMutex + cs ConfigSelector +} + +// UpdateConfigSelector swaps to the provided ConfigSelector and blocks until +// all uses of the previous ConfigSelector have completed. +func (scs *SafeConfigSelector) UpdateConfigSelector(cs ConfigSelector) { + scs.mu.Lock() + defer scs.mu.Unlock() + scs.cs = cs +} + +// SelectConfig defers to the current ConfigSelector in scs. +func (scs *SafeConfigSelector) SelectConfig(r RPCInfo) (*RPCConfig, error) { + scs.mu.RLock() + defer scs.mu.RUnlock() + return scs.cs.SelectConfig(r) +} diff --git a/src/runtime/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/src/runtime/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go new file mode 100644 index 000000000..0d5a811dd --- /dev/null +++ b/src/runtime/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package unix implements a resolver for unix targets. +package unix + +import ( + "fmt" + + "google.golang.org/grpc/internal/transport/networktype" + "google.golang.org/grpc/resolver" +) + +const unixScheme = "unix" +const unixAbstractScheme = "unix-abstract" + +type builder struct { + scheme string +} + +func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { + if target.Authority != "" { + return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.Authority) + } + addr := resolver.Address{Addr: target.Endpoint} + if b.scheme == unixAbstractScheme { + // prepend "\x00" to address for unix-abstract + addr.Addr = "\x00" + addr.Addr + } + cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}}) + return &nopResolver{}, nil +} + +func (b *builder) Scheme() string { + return b.scheme +} + +type nopResolver struct { +} + +func (*nopResolver) ResolveNow(resolver.ResolveNowOptions) {} + +func (*nopResolver) Close() {} + +func init() { + resolver.Register(&builder{scheme: unixScheme}) + resolver.Register(&builder{scheme: unixAbstractScheme}) +} diff --git a/src/runtime/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/src/runtime/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go index af3e2b5f7..bd4b8875f 100644 --- a/src/runtime/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go +++ b/src/runtime/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go @@ -22,8 +22,10 @@ package serviceconfig import ( "encoding/json" "fmt" + "time" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" externalserviceconfig "google.golang.org/grpc/serviceconfig" ) @@ -104,3 +106,57 @@ func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { // case. return fmt.Errorf("invalid loadBalancingConfig: no supported policies found") } + +// MethodConfig defines the configuration recommended by the service providers for a +// particular method. +type MethodConfig struct { + // WaitForReady indicates whether RPCs sent to this method should wait until + // the connection is ready by default (!failfast). The value specified via the + // gRPC client API will override the value set here. + WaitForReady *bool + // Timeout is the default timeout for RPCs sent to this method. The actual + // deadline used will be the minimum of the value specified here and the value + // set by the application via the gRPC client API. If either one is not set, + // then the other will be used. If neither is set, then the RPC has no deadline. + Timeout *time.Duration + // MaxReqSize is the maximum allowed payload size for an individual request in a + // stream (client->server) in bytes. The size which is measured is the serialized + // payload after per-message compression (but before stream compression) in bytes. + // The actual value used is the minimum of the value specified here and the value set + // by the application via the gRPC client API. If either one is not set, then the other + // will be used. If neither is set, then the built-in default is used. + MaxReqSize *int + // MaxRespSize is the maximum allowed payload size for an individual response in a + // stream (server->client) in bytes. + MaxRespSize *int + // RetryPolicy configures retry options for the method. + RetryPolicy *RetryPolicy +} + +// RetryPolicy defines the go-native version of the retry policy defined by the +// service config here: +// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config +type RetryPolicy struct { + // MaxAttempts is the maximum number of attempts, including the original RPC. + // + // This field is required and must be two or greater. + MaxAttempts int + + // Exponential backoff parameters. The initial retry attempt will occur at + // random(0, initialBackoff). In general, the nth attempt will occur at + // random(0, + // min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)). + // + // These fields are required and must be greater than zero. + InitialBackoff time.Duration + MaxBackoff time.Duration + BackoffMultiplier float64 + + // The set of status codes which may be retried. + // + // Status codes are specified as strings, e.g., "UNAVAILABLE". + // + // This field is required and must be non-empty. + // Note: a set is used to store this for easy lookup. + RetryableStatusCodes map[codes.Code]bool +} diff --git a/src/runtime/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/src/runtime/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go index 2fdcb76e6..4b2964f2a 100644 --- a/src/runtime/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go +++ b/src/runtime/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go @@ -44,25 +44,23 @@ func GetCPUTime() int64 { } // Rusage is an alias for syscall.Rusage under linux environment. -type Rusage syscall.Rusage +type Rusage = syscall.Rusage // GetRusage returns the resource usage of current process. -func GetRusage() (rusage *Rusage) { - rusage = new(Rusage) - syscall.Getrusage(syscall.RUSAGE_SELF, (*syscall.Rusage)(rusage)) - return +func GetRusage() *Rusage { + rusage := new(Rusage) + syscall.Getrusage(syscall.RUSAGE_SELF, rusage) + return rusage } // CPUTimeDiff returns the differences of user CPU time and system CPU time used // between two Rusage structs. func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { - f := (*syscall.Rusage)(first) - l := (*syscall.Rusage)(latest) var ( - utimeDiffs = l.Utime.Sec - f.Utime.Sec - utimeDiffus = l.Utime.Usec - f.Utime.Usec - stimeDiffs = l.Stime.Sec - f.Stime.Sec - stimeDiffus = l.Stime.Usec - f.Stime.Usec + utimeDiffs = latest.Utime.Sec - first.Utime.Sec + utimeDiffus = latest.Utime.Usec - first.Utime.Usec + stimeDiffs = latest.Stime.Sec - first.Stime.Sec + stimeDiffus = latest.Stime.Usec - first.Stime.Usec ) uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6 diff --git a/src/runtime/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/src/runtime/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go index adae60d65..7913ef1db 100644 --- a/src/runtime/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go +++ b/src/runtime/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -50,7 +50,7 @@ func GetCPUTime() int64 { type Rusage struct{} // GetRusage is a no-op function under non-linux or appengine environment. -func GetRusage() (rusage *Rusage) { +func GetRusage() *Rusage { log() return nil } diff --git a/src/runtime/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/src/runtime/vendor/google.golang.org/grpc/internal/transport/http2_client.go index e73b77a15..8902b7f90 100644 --- a/src/runtime/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/src/runtime/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -33,6 +33,8 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" "google.golang.org/grpc/internal/grpcutil" + imetadata "google.golang.org/grpc/internal/metadata" + "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -59,7 +61,7 @@ type http2Client struct { cancel context.CancelFunc ctxDone <-chan struct{} // Cache the ctx.Done() chan. userAgent string - md interface{} + md metadata.MD conn net.Conn // underlying communication channel loopy *loopyWriter remoteAddr net.Addr @@ -137,11 +139,27 @@ type http2Client struct { connectionID uint64 } -func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) { +func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) { + address := addr.Addr + networkType, ok := networktype.Get(addr) if fn != nil { - return fn(ctx, addr) + if networkType == "unix" && !strings.HasPrefix(address, "\x00") { + // For backward compatibility, if the user dialed "unix:///path", + // the passthrough resolver would be used and the user's custom + // dialer would see "unix:///path". Since the unix resolver is used + // and the address is now "/path", prepend "unix://" so the user's + // custom dialer sees the same address. + return fn(ctx, "unix://"+address) + } + return fn(ctx, address) + } + if !ok { + networkType, address = parseDialTarget(address) } - return (&net.Dialer{}).DialContext(ctx, "tcp", addr) + if networkType == "tcp" && useProxy { + return proxyDial(ctx, address, grpcUA) + } + return (&net.Dialer{}).DialContext(ctx, networkType, address) } func isTemporary(err error) bool { @@ -172,7 +190,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } }() - conn, err := dial(connectCtx, opts.Dialer, addr.Addr) + conn, err := dial(connectCtx, opts.Dialer, addr, opts.UseProxy, opts.UserAgent) if err != nil { if opts.FailOnNonTempDialError { return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) @@ -226,6 +244,18 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts if err != nil { return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) } + for _, cd := range perRPCCreds { + if cd.RequireTransportSecurity() { + if ci, ok := authInfo.(interface { + GetCommonAuthInfo() credentials.CommonAuthInfo + }); ok { + secLevel := ci.GetCommonAuthInfo().SecurityLevel + if secLevel != credentials.InvalidSecurityLevel && secLevel < credentials.PrivacyAndIntegrity { + return nil, connectionErrorf(true, nil, "transport: cannot send secure credentials on an insecure connection") + } + } + } + } isSecure = true if transportCreds.Info().SecurityProtocol == "tls" { scheme = "https" @@ -248,7 +278,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ctxDone: ctx.Done(), // Cache Done chan. cancel: cancel, userAgent: opts.UserAgent, - md: addr.Metadata, conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), @@ -276,6 +305,12 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts keepaliveEnabled: keepaliveEnabled, bufferPool: newBufferPool(), } + + if md, ok := addr.Metadata.(*metadata.MD); ok { + t.md = *md + } else if md := imetadata.Get(addr); md != nil { + t.md = md + } t.controlBuf = newControlBuffer(t.ctxDone) if opts.InitialWindowSize >= defaultWindowSize { t.initialWindowSize = opts.InitialWindowSize @@ -481,25 +516,23 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) for _, vv := range added { for i, v := range vv { if i%2 == 0 { - k = v + k = strings.ToLower(v) continue } // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. if isReservedHeader(k) { continue } - headerFields = append(headerFields, hpack.HeaderField{Name: strings.ToLower(k), Value: encodeMetadataHeader(k, v)}) + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } } } - if md, ok := t.md.(*metadata.MD); ok { - for k, vv := range *md { - if isReservedHeader(k) { - continue - } - for _, v := range vv { - headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) - } + for k, vv := range t.md { + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } } return headerFields, nil @@ -549,8 +582,11 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call // Note: if these credentials are provided both via dial options and call // options, then both sets of credentials will be applied. if callCreds := callHdr.Creds; callCreds != nil { - if !t.isSecure && callCreds.RequireTransportSecurity() { - return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") + if callCreds.RequireTransportSecurity() { + ri, _ := credentials.RequestInfoFromContext(ctx) + if !t.isSecure || credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity) != nil { + return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") + } } data, err := callCreds.GetRequestMetadata(ctx, audience) if err != nil { diff --git a/src/runtime/vendor/google.golang.org/grpc/internal/transport/http_util.go b/src/runtime/vendor/google.golang.org/grpc/internal/transport/http_util.go index 4d15afbf7..7e41d1183 100644 --- a/src/runtime/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/src/runtime/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -27,6 +27,7 @@ import ( "math" "net" "net/http" + "net/url" "strconv" "strings" "time" @@ -598,3 +599,31 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) return f } + +// parseDialTarget returns the network and address to pass to dialer. +func parseDialTarget(target string) (string, string) { + net := "tcp" + m1 := strings.Index(target, ":") + m2 := strings.Index(target, ":/") + // handle unix:addr which will fail with url.Parse + if m1 >= 0 && m2 < 0 { + if n := target[0:m1]; n == "unix" { + return n, target[m1+1:] + } + } + if m2 >= 0 { + t, err := url.Parse(target) + if err != nil { + return net, target + } + scheme := t.Scheme + addr := t.Path + if scheme == "unix" { + if addr == "" { + addr = t.Host + } + return scheme, addr + } + } + return net, target +} diff --git a/src/runtime/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go b/src/runtime/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go new file mode 100644 index 000000000..96967428b --- /dev/null +++ b/src/runtime/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go @@ -0,0 +1,46 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package networktype declares the network type to be used in the default +// dailer. Attribute of a resolver.Address. +package networktype + +import ( + "google.golang.org/grpc/resolver" +) + +// keyType is the key to use for storing State in Attributes. +type keyType string + +const key = keyType("grpc.internal.transport.networktype") + +// Set returns a copy of the provided address with attributes containing networkType. +func Set(address resolver.Address, networkType string) resolver.Address { + address.Attributes = address.Attributes.WithValues(key, networkType) + return address +} + +// Get returns the network type in the resolver.Address and true, or "", false +// if not present. +func Get(address resolver.Address) (string, bool) { + v := address.Attributes.Value(key) + if v == nil { + return "", false + } + return v.(string), true +} diff --git a/src/runtime/vendor/google.golang.org/grpc/proxy.go b/src/runtime/vendor/google.golang.org/grpc/internal/transport/proxy.go similarity index 73% rename from src/runtime/vendor/google.golang.org/grpc/proxy.go rename to src/runtime/vendor/google.golang.org/grpc/internal/transport/proxy.go index f8f69bfb7..a662bf39a 100644 --- a/src/runtime/vendor/google.golang.org/grpc/proxy.go +++ b/src/runtime/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -16,13 +16,12 @@ * */ -package grpc +package transport import ( "bufio" "context" "encoding/base64" - "errors" "fmt" "io" "net" @@ -34,8 +33,6 @@ import ( const proxyAuthHeaderKey = "Proxy-Authorization" var ( - // errDisabled indicates that proxy is disabled for the address. - errDisabled = errors.New("proxy is disabled for the address") // The following variable will be overwritten in the tests. httpProxyFromEnvironment = http.ProxyFromEnvironment ) @@ -51,9 +48,6 @@ func mapAddress(ctx context.Context, address string) (*url.URL, error) { if err != nil { return nil, err } - if url == nil { - return nil, errDisabled - } return url, nil } @@ -76,7 +70,7 @@ func basicAuth(username, password string) string { return base64.StdEncoding.EncodeToString([]byte(auth)) } -func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL) (_ net.Conn, err error) { +func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL, grpcUA string) (_ net.Conn, err error) { defer func() { if err != nil { conn.Close() @@ -115,32 +109,28 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri return &bufConn{Conn: conn, r: r}, nil } -// newProxyDialer returns a dialer that connects to proxy first if necessary. -// The returned dialer checks if a proxy is necessary, dial to the proxy with the -// provided dialer, does HTTP CONNECT handshake and returns the connection. -func newProxyDialer(dialer func(context.Context, string) (net.Conn, error)) func(context.Context, string) (net.Conn, error) { - return func(ctx context.Context, addr string) (conn net.Conn, err error) { - var newAddr string - proxyURL, err := mapAddress(ctx, addr) - if err != nil { - if err != errDisabled { - return nil, err - } - newAddr = addr - } else { - newAddr = proxyURL.Host - } +// proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy +// is necessary, dials, does the HTTP CONNECT handshake, and returns the +// connection. +func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) { + newAddr := addr + proxyURL, err := mapAddress(ctx, addr) + if err != nil { + return nil, err + } + if proxyURL != nil { + newAddr = proxyURL.Host + } - conn, err = dialer(ctx, newAddr) - if err != nil { - return - } - if proxyURL != nil { - // proxy is disabled if proxyURL is nil. - conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL) - } + conn, err = (&net.Dialer{}).DialContext(ctx, "tcp", newAddr) + if err != nil { return } + if proxyURL != nil { + // proxy is disabled if proxyURL is nil. + conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) + } + return } func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { diff --git a/src/runtime/vendor/google.golang.org/grpc/internal/transport/transport.go b/src/runtime/vendor/google.golang.org/grpc/internal/transport/transport.go index b74030a96..9c8f79cb4 100644 --- a/src/runtime/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/src/runtime/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -569,6 +569,8 @@ type ConnectOptions struct { ChannelzParentID int64 // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. MaxHeaderListSize *uint32 + // UseProxy specifies if a proxy should be used. + UseProxy bool } // NewClientTransport establishes the transport with the required ConnectOptions diff --git a/src/runtime/vendor/google.golang.org/grpc/regenerate.sh b/src/runtime/vendor/google.golang.org/grpc/regenerate.sh index 3443ad975..fc6725b89 100644 --- a/src/runtime/vendor/google.golang.org/grpc/regenerate.sh +++ b/src/runtime/vendor/google.golang.org/grpc/regenerate.sh @@ -40,24 +40,14 @@ echo "go install cmd/protoc-gen-go-grpc" echo "git clone https://github.com/grpc/grpc-proto" git clone --quiet https://github.com/grpc/grpc-proto ${WORKDIR}/grpc-proto +echo "git clone https://github.com/protocolbuffers/protobuf" +git clone --quiet https://github.com/protocolbuffers/protobuf ${WORKDIR}/protobuf + # Pull in code.proto as a proto dependency mkdir -p ${WORKDIR}/googleapis/google/rpc echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto" curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto -# Pull in the following repos to build the MeshCA config proto. -ENVOY_API_REPOS=( - "https://github.com/envoyproxy/data-plane-api" - "https://github.com/cncf/udpa" - "https://github.com/envoyproxy/protoc-gen-validate" -) -for repo in ${ENVOY_API_REPOS[@]}; do - dirname=$(basename ${repo}) - mkdir -p ${WORKDIR}/${dirname} - echo "git clone ${repo}" - git clone --quiet ${repo} ${WORKDIR}/${dirname} -done - # Pull in the MeshCA service proto. mkdir -p ${WORKDIR}/istio/istio/google/security/meshca/v1 echo "curl https://raw.githubusercontent.com/istio/istio/master/security/proto/providers/google/meshca.proto" @@ -84,15 +74,15 @@ SOURCES=( ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto ${WORKDIR}/grpc-proto/grpc/service_config/service_config.proto - ${WORKDIR}/grpc-proto/grpc/tls/provider/meshca/experimental/config.proto + ${WORKDIR}/grpc-proto/grpc/testing/*.proto + ${WORKDIR}/grpc-proto/grpc/core/*.proto ${WORKDIR}/istio/istio/google/security/meshca/v1/meshca.proto ) # These options of the form 'Mfoo.proto=bar' instruct the codegen to use an # import path of 'bar' in the generated code when 'foo.proto' is imported in # one of the sources. -OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config,\ -Menvoy/config/core/v3/config_source.proto=github.com/envoyproxy/go-control-plane/envoy/config/core/v3 +OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config,Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core for src in ${SOURCES[@]}; do echo "protoc ${src}" @@ -100,9 +90,7 @@ for src in ${SOURCES[@]}; do -I"." \ -I${WORKDIR}/grpc-proto \ -I${WORKDIR}/googleapis \ - -I${WORKDIR}/data-plane-api \ - -I${WORKDIR}/udpa \ - -I${WORKDIR}/protoc-gen-validate \ + -I${WORKDIR}/protobuf/src \ -I${WORKDIR}/istio \ ${src} done @@ -113,9 +101,7 @@ for src in ${LEGACY_SOURCES[@]}; do -I"." \ -I${WORKDIR}/grpc-proto \ -I${WORKDIR}/googleapis \ - -I${WORKDIR}/data-plane-api \ - -I${WORKDIR}/udpa \ - -I${WORKDIR}/protoc-gen-validate \ + -I${WORKDIR}/protobuf/src \ -I${WORKDIR}/istio \ ${src} done @@ -132,6 +118,10 @@ rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testingv3/*.pb.go # grpc/service_config/service_config.proto does not have a go_package option. mv ${WORKDIR}/out/grpc/service_config/service_config.pb.go internal/proto/grpc_service_config +# grpc/testing does not have a go_package option. +mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/ +mv ${WORKDIR}/out/grpc/core/*.pb.go interop/grpc_testing/core/ + # istio/google/security/meshca/v1/meshca.proto does not have a go_package option. mkdir -p ${WORKDIR}/out/google.golang.org/grpc/credentials/tls/certprovider/meshca/internal/v1/ mv ${WORKDIR}/out/istio/google/security/meshca/v1/* ${WORKDIR}/out/google.golang.org/grpc/credentials/tls/certprovider/meshca/internal/v1/ diff --git a/src/runtime/vendor/google.golang.org/grpc/rpc_util.go b/src/runtime/vendor/google.golang.org/grpc/rpc_util.go index f0609f2a4..c0a1208f2 100644 --- a/src/runtime/vendor/google.golang.org/grpc/rpc_util.go +++ b/src/runtime/vendor/google.golang.org/grpc/rpc_util.go @@ -27,7 +27,6 @@ import ( "io" "io/ioutil" "math" - "net/url" "strings" "sync" "time" @@ -872,40 +871,6 @@ func setCallInfoCodec(c *callInfo) error { return nil } -// parseDialTarget returns the network and address to pass to dialer -func parseDialTarget(target string) (net string, addr string) { - net = "tcp" - - m1 := strings.Index(target, ":") - m2 := strings.Index(target, ":/") - - // handle unix:addr which will fail with url.Parse - if m1 >= 0 && m2 < 0 { - if n := target[0:m1]; n == "unix" { - net = n - addr = target[m1+1:] - return net, addr - } - } - if m2 >= 0 { - t, err := url.Parse(target) - if err != nil { - return net, target - } - scheme := t.Scheme - addr = t.Path - if scheme == "unix" { - net = scheme - if addr == "" { - addr = t.Host - } - return net, addr - } - } - - return net, target -} - // channelzData is used to store channelz related data for ClientConn, addrConn and Server. // These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic // operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. @@ -923,8 +888,7 @@ type channelzData struct { // buffer files to ensure compatibility with the gRPC version used. The latest // support package version is 7. // -// Older versions are kept for compatibility. They may be removed if -// compatibility cannot be maintained. +// Older versions are kept for compatibility. // // These constants should not be referenced from any other code. const ( diff --git a/src/runtime/vendor/google.golang.org/grpc/server.go b/src/runtime/vendor/google.golang.org/grpc/server.go index 968eb598e..7a2aa28a1 100644 --- a/src/runtime/vendor/google.golang.org/grpc/server.go +++ b/src/runtime/vendor/google.golang.org/grpc/server.go @@ -40,6 +40,7 @@ import ( "google.golang.org/grpc/encoding" "google.golang.org/grpc/encoding/proto" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" @@ -58,6 +59,12 @@ const ( defaultServerMaxSendMessageSize = math.MaxInt32 ) +func init() { + internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials { + return srv.opts.creds + } +} + var statusOK = status.New(codes.OK, "") var logger = grpclog.Component("core") diff --git a/src/runtime/vendor/google.golang.org/grpc/service_config.go b/src/runtime/vendor/google.golang.org/grpc/service_config.go index 5e434ca7f..22c4240cf 100644 --- a/src/runtime/vendor/google.golang.org/grpc/service_config.go +++ b/src/runtime/vendor/google.golang.org/grpc/service_config.go @@ -41,29 +41,7 @@ const maxInt = int(^uint(0) >> 1) // Deprecated: Users should not use this struct. Service config should be received // through name resolver, as specified here // https://github.com/grpc/grpc/blob/master/doc/service_config.md -type MethodConfig struct { - // WaitForReady indicates whether RPCs sent to this method should wait until - // the connection is ready by default (!failfast). The value specified via the - // gRPC client API will override the value set here. - WaitForReady *bool - // Timeout is the default timeout for RPCs sent to this method. The actual - // deadline used will be the minimum of the value specified here and the value - // set by the application via the gRPC client API. If either one is not set, - // then the other will be used. If neither is set, then the RPC has no deadline. - Timeout *time.Duration - // MaxReqSize is the maximum allowed payload size for an individual request in a - // stream (client->server) in bytes. The size which is measured is the serialized - // payload after per-message compression (but before stream compression) in bytes. - // The actual value used is the minimum of the value specified here and the value set - // by the application via the gRPC client API. If either one is not set, then the other - // will be used. If neither is set, then the built-in default is used. - MaxReqSize *int - // MaxRespSize is the maximum allowed payload size for an individual response in a - // stream (server->client) in bytes. - MaxRespSize *int - // RetryPolicy configures retry options for the method. - retryPolicy *retryPolicy -} +type MethodConfig = internalserviceconfig.MethodConfig type lbConfig struct { name string @@ -127,34 +105,6 @@ type healthCheckConfig struct { ServiceName string } -// retryPolicy defines the go-native version of the retry policy defined by the -// service config here: -// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config -type retryPolicy struct { - // MaxAttempts is the maximum number of attempts, including the original RPC. - // - // This field is required and must be two or greater. - maxAttempts int - - // Exponential backoff parameters. The initial retry attempt will occur at - // random(0, initialBackoff). In general, the nth attempt will occur at - // random(0, - // min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)). - // - // These fields are required and must be greater than zero. - initialBackoff time.Duration - maxBackoff time.Duration - backoffMultiplier float64 - - // The set of status codes which may be retried. - // - // Status codes are specified as strings, e.g., "UNAVAILABLE". - // - // This field is required and must be non-empty. - // Note: a set is used to store this for easy lookup. - retryableStatusCodes map[codes.Code]bool -} - type jsonRetryPolicy struct { MaxAttempts int InitialBackoff string @@ -313,7 +263,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { WaitForReady: m.WaitForReady, Timeout: d, } - if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { + if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) return &serviceconfig.ParseResult{Err: err} } @@ -359,7 +309,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { return &serviceconfig.ParseResult{Config: &sc} } -func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) { +func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPolicy, err error) { if jrp == nil { return nil, nil } @@ -381,19 +331,19 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) { return nil, nil } - rp := &retryPolicy{ - maxAttempts: jrp.MaxAttempts, - initialBackoff: *ib, - maxBackoff: *mb, - backoffMultiplier: jrp.BackoffMultiplier, - retryableStatusCodes: make(map[codes.Code]bool), + rp := &internalserviceconfig.RetryPolicy{ + MaxAttempts: jrp.MaxAttempts, + InitialBackoff: *ib, + MaxBackoff: *mb, + BackoffMultiplier: jrp.BackoffMultiplier, + RetryableStatusCodes: make(map[codes.Code]bool), } - if rp.maxAttempts > 5 { + if rp.MaxAttempts > 5 { // TODO(retry): Make the max maxAttempts configurable. - rp.maxAttempts = 5 + rp.MaxAttempts = 5 } for _, code := range jrp.RetryableStatusCodes { - rp.retryableStatusCodes[code] = true + rp.RetryableStatusCodes[code] = true } return rp, nil } diff --git a/src/runtime/vendor/google.golang.org/grpc/status/status.go b/src/runtime/vendor/google.golang.org/grpc/status/status.go index 01e182c30..54d187186 100644 --- a/src/runtime/vendor/google.golang.org/grpc/status/status.go +++ b/src/runtime/vendor/google.golang.org/grpc/status/status.go @@ -73,9 +73,11 @@ func FromProto(s *spb.Status) *Status { return status.FromProto(s) } -// FromError returns a Status representing err if it was produced from this -// package or has a method `GRPCStatus() *Status`. Otherwise, ok is false and a -// Status is returned with codes.Unknown and the original error message. +// FromError returns a Status representing err if it was produced by this +// package or has a method `GRPCStatus() *Status`. +// If err is nil, a Status is returned with codes.OK and no message. +// Otherwise, ok is false and a Status is returned with codes.Unknown and +// the original error message. func FromError(err error) (s *Status, ok bool) { if err == nil { return nil, true diff --git a/src/runtime/vendor/google.golang.org/grpc/stream.go b/src/runtime/vendor/google.golang.org/grpc/stream.go index 5fd856a38..eda1248d6 100644 --- a/src/runtime/vendor/google.golang.org/grpc/stream.go +++ b/src/runtime/vendor/google.golang.org/grpc/stream.go @@ -36,6 +36,8 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcutil" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -170,7 +172,21 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth if err := cc.waitForResolvedAddrs(ctx); err != nil { return nil, err } - mc := cc.GetMethodConfig(method) + + var mc serviceconfig.MethodConfig + var onCommit func() + rpcConfig, err := cc.safeConfigSelector.SelectConfig(iresolver.RPCInfo{Context: ctx, Method: method}) + if err != nil { + return nil, status.Convert(err).Err() + } + if rpcConfig != nil { + if rpcConfig.Context != nil { + ctx = rpcConfig.Context + } + mc = rpcConfig.MethodConfig + onCommit = rpcConfig.OnCommitted + } + if mc.WaitForReady != nil { c.failFast = !*mc.WaitForReady } @@ -272,6 +288,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth cancel: cancel, beginTime: beginTime, firstAttempt: true, + onCommit: onCommit, } if !cc.dopts.disableRetry { cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) @@ -432,7 +449,8 @@ type clientStream struct { // place where we need to check if the attempt is nil. attempt *csAttempt // TODO(hedging): hedging will have multiple attempts simultaneously. - committed bool // active attempt committed for retry? + committed bool // active attempt committed for retry? + onCommit func() buffer []func(a *csAttempt) error // operations to replay on retry bufferSize int // current size of buffer } @@ -461,6 +479,9 @@ type csAttempt struct { } func (cs *clientStream) commitAttemptLocked() { + if !cs.committed && cs.onCommit != nil { + cs.onCommit() + } cs.committed = true cs.buffer = nil } @@ -539,8 +560,8 @@ func (cs *clientStream) shouldRetry(err error) error { code = status.Convert(err).Code() } - rp := cs.methodConfig.retryPolicy - if rp == nil || !rp.retryableStatusCodes[code] { + rp := cs.methodConfig.RetryPolicy + if rp == nil || !rp.RetryableStatusCodes[code] { return err } @@ -549,7 +570,7 @@ func (cs *clientStream) shouldRetry(err error) error { if cs.retryThrottler.throttle() { return err } - if cs.numRetries+1 >= rp.maxAttempts { + if cs.numRetries+1 >= rp.MaxAttempts { return err } @@ -558,9 +579,9 @@ func (cs *clientStream) shouldRetry(err error) error { dur = time.Millisecond * time.Duration(pushback) cs.numRetriesSincePushback = 0 } else { - fact := math.Pow(rp.backoffMultiplier, float64(cs.numRetriesSincePushback)) - cur := float64(rp.initialBackoff) * fact - if max := float64(rp.maxBackoff); cur > max { + fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback)) + cur := float64(rp.InitialBackoff) * fact + if max := float64(rp.MaxBackoff); cur > max { cur = max } dur = time.Duration(grpcrand.Int63n(int64(cur))) diff --git a/src/runtime/vendor/google.golang.org/grpc/version.go b/src/runtime/vendor/google.golang.org/grpc/version.go index 144b0bf9e..51024d6b3 100644 --- a/src/runtime/vendor/google.golang.org/grpc/version.go +++ b/src/runtime/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.33.2" +const Version = "1.36.0" diff --git a/src/runtime/vendor/google.golang.org/grpc/vet.sh b/src/runtime/vendor/google.golang.org/grpc/vet.sh index 48652f604..b41df6dc8 100644 --- a/src/runtime/vendor/google.golang.org/grpc/vet.sh +++ b/src/runtime/vendor/google.golang.org/grpc/vet.sh @@ -53,7 +53,7 @@ if [[ "$1" = "-install" ]]; then fi if [[ -z "${VET_SKIP_PROTO}" ]]; then if [[ "${TRAVIS}" = "true" ]]; then - PROTOBUF_VERSION=3.3.0 + PROTOBUF_VERSION=3.14.0 PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip pushd /home/travis wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} @@ -61,7 +61,7 @@ if [[ "$1" = "-install" ]]; then bin/protoc --version popd elif [[ "${GITHUB_ACTIONS}" = "true" ]]; then - PROTOBUF_VERSION=3.3.0 + PROTOBUF_VERSION=3.14.0 PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip pushd /home/runner/go wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} diff --git a/src/runtime/vendor/modules.txt b/src/runtime/vendor/modules.txt index b96c3cbe9..61698559f 100644 --- a/src/runtime/vendor/modules.txt +++ b/src/runtime/vendor/modules.txt @@ -31,8 +31,6 @@ github.com/Microsoft/hcsshim/osversion github.com/PuerkitoBio/purell # github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 github.com/PuerkitoBio/urlesc -# github.com/apache/thrift v0.13.0 -github.com/apache/thrift/lib/go/thrift # github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a github.com/asaskevich/govalidator # github.com/beorn7/perks v1.0.1 @@ -247,7 +245,7 @@ github.com/sirupsen/logrus github.com/sirupsen/logrus/hooks/syslog # github.com/smartystreets/goconvey v1.6.4 ## explicit -# github.com/stretchr/testify v1.6.1 +# github.com/stretchr/testify v1.7.0 ## explicit github.com/stretchr/testify/assert # github.com/urfave/cli v1.22.2 @@ -260,40 +258,40 @@ github.com/vishvananda/netlink/nl # github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae ## explicit github.com/vishvananda/netns -# go.opencensus.io v0.22.4 +# go.opencensus.io v0.23.0 +## explicit go.opencensus.io go.opencensus.io/internal go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/tracestate -# go.opentelemetry.io/otel v0.15.0 +# go.opentelemetry.io/otel v1.0.0 ## explicit go.opentelemetry.io/otel +go.opentelemetry.io/otel/attribute +go.opentelemetry.io/otel/baggage go.opentelemetry.io/otel/codes go.opentelemetry.io/otel/internal go.opentelemetry.io/otel/internal/baggage go.opentelemetry.io/otel/internal/global -go.opentelemetry.io/otel/internal/trace/noop -go.opentelemetry.io/otel/internal/trace/parent -go.opentelemetry.io/otel/label -go.opentelemetry.io/otel/metric -go.opentelemetry.io/otel/metric/number -go.opentelemetry.io/otel/metric/registry go.opentelemetry.io/otel/propagation -go.opentelemetry.io/otel/semconv -go.opentelemetry.io/otel/trace -go.opentelemetry.io/otel/unit -# go.opentelemetry.io/otel/exporters/trace/jaeger v0.15.0 +go.opentelemetry.io/otel/semconv/v1.4.0 +# go.opentelemetry.io/otel/exporters/jaeger v1.0.0 ## explicit -go.opentelemetry.io/otel/exporters/trace/jaeger -go.opentelemetry.io/otel/exporters/trace/jaeger/internal/gen-go/jaeger -# go.opentelemetry.io/otel/sdk v0.15.0 +go.opentelemetry.io/otel/exporters/jaeger +go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent +go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger +go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore +go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift +# go.opentelemetry.io/otel/sdk v1.0.0 ## explicit -go.opentelemetry.io/otel/sdk/export/trace go.opentelemetry.io/otel/sdk/instrumentation go.opentelemetry.io/otel/sdk/internal go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace +# go.opentelemetry.io/otel/trace v1.0.0 +## explicit +go.opentelemetry.io/otel/trace # golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 ## explicit golang.org/x/net/context @@ -304,26 +302,25 @@ golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/timeseries golang.org/x/net/trace -# golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 +# golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93 ## explicit golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sync v0.0.0-20201207232520-09787c993a3a -golang.org/x/sync/semaphore # golang.org/x/sys v0.0.0-20210426230700-d19ff857e887 ## explicit golang.org/x/sys/internal/unsafeheader golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/text v0.3.4 +golang.org/x/sys/windows/registry +# golang.org/x/text v0.3.5 +## explicit golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm golang.org/x/text/width -# google.golang.org/api v0.32.0 -google.golang.org/api/support/bundler -# google.golang.org/appengine v1.6.6 +# google.golang.org/appengine v1.6.7 +## explicit google.golang.org/appengine/internal google.golang.org/appengine/internal/base google.golang.org/appengine/internal/datastore @@ -331,9 +328,10 @@ google.golang.org/appengine/internal/log google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a => google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 +# google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb => google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 +## explicit google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.33.2 +# google.golang.org/grpc v1.36.0 ## explicit google.golang.org/grpc google.golang.org/grpc/attributes @@ -361,12 +359,16 @@ google.golang.org/grpc/internal/grpclog google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/grpcutil +google.golang.org/grpc/internal/metadata +google.golang.org/grpc/internal/resolver google.golang.org/grpc/internal/resolver/dns google.golang.org/grpc/internal/resolver/passthrough +google.golang.org/grpc/internal/resolver/unix google.golang.org/grpc/internal/serviceconfig google.golang.org/grpc/internal/status google.golang.org/grpc/internal/syscall google.golang.org/grpc/internal/transport +google.golang.org/grpc/internal/transport/networktype google.golang.org/grpc/keepalive google.golang.org/grpc/metadata google.golang.org/grpc/peer diff --git a/src/runtime/virtcontainers/mount.go b/src/runtime/virtcontainers/mount.go index 1ca4f90f3..f12f45067 100644 --- a/src/runtime/virtcontainers/mount.go +++ b/src/runtime/virtcontainers/mount.go @@ -19,7 +19,7 @@ import ( "github.com/kata-containers/kata-containers/src/runtime/pkg/katautils/katatrace" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/utils" "github.com/sirupsen/logrus" - otelLabel "go.opentelemetry.io/otel/label" + otelLabel "go.opentelemetry.io/otel/attribute" ) // DefaultShmSize is the default shm size to be used in case host diff --git a/src/runtime/virtcontainers/pkg/agent/protocols/client/client.go b/src/runtime/virtcontainers/pkg/agent/protocols/client/client.go index 86a1943ce..2cc976128 100644 --- a/src/runtime/virtcontainers/pkg/agent/protocols/client/client.go +++ b/src/runtime/virtcontainers/pkg/agent/protocols/client/client.go @@ -21,7 +21,7 @@ import ( "github.com/mdlayher/vsock" "github.com/sirupsen/logrus" "go.opentelemetry.io/otel" - otelLabel "go.opentelemetry.io/otel/label" + otelLabel "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -129,7 +129,7 @@ func TraceUnaryClientInterceptor() ttrpc.UnaryClientInterceptor { } // err can be nil, that will return an OK response code if status, _ := status.FromError(err); status != nil { - span.SetAttributes(otelLabel.Key("RPC_CODE").Uint((uint)(status.Code()))) + span.SetAttributes(otelLabel.Key("RPC_CODE").Int((int)(status.Code()))) span.SetAttributes(otelLabel.Key("RPC_MESSAGE").String(status.Message())) } @@ -153,6 +153,15 @@ func (s *metadataSupplier) Set(key string, value string) { s.metadata.Set(key, value) } +// Required to satisfy Opentelemetry TextMapCarrier interface +func (s *metadataSupplier) Keys() []string { + keys := make([]string, 0, len(*s.metadata)) + for k := range *s.metadata { + keys = append(keys, k) + } + return keys +} + func inject(ctx context.Context, metadata *ttrpc.MD) { otel.GetTextMapPropagator().Inject(ctx, &metadataSupplier{ metadata: metadata,