diff --git a/go.mod b/go.mod index b393d5fd8..3cde919ba 100644 --- a/go.mod +++ b/go.mod @@ -4,9 +4,9 @@ go 1.25.7 require ( github.com/BurntSushi/toml v1.6.0 - github.com/cilium/ebpf v0.20.0 + github.com/cilium/ebpf v0.21.0 github.com/containers/image/v5 v5.36.2 - github.com/distribution/distribution/v3 v3.0.0 + github.com/distribution/distribution/v3 v3.1.0 github.com/go-logr/logr v1.4.3 github.com/go-logr/zapr v1.3.0 github.com/google/uuid v1.6.0 @@ -17,14 +17,14 @@ require ( github.com/opencontainers/image-spec v1.1.1 github.com/openshift/api v0.0.0-20260218112846-22c744831738 github.com/openshift/library-go v0.0.0-20260223145824-7b234b47a906 - github.com/panjf2000/ants/v2 v2.11.5 + github.com/panjf2000/ants/v2 v2.12.0 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.86.2 github.com/prometheus/client_golang v1.23.2 go.uber.org/zap v1.27.1 - golang.org/x/crypto v0.48.0 - golang.org/x/sys v0.41.0 - golang.org/x/time v0.14.0 - google.golang.org/grpc v1.79.1 + golang.org/x/crypto v0.50.0 + golang.org/x/sys v0.43.0 + golang.org/x/time v0.15.0 + google.golang.org/grpc v1.80.0 k8s.io/api v0.34.1 k8s.io/apimachinery v0.34.1 k8s.io/client-go v0.34.1 @@ -66,38 +66,38 @@ require ( github.com/evanphx/json-patch/v5 v5.9.11 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect - github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.1 // indirect github.com/go-errors/errors v1.5.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/jsonpointer v0.22.4 // indirect - github.com/go-openapi/jsonreference v0.21.4 // indirect - github.com/go-openapi/swag v0.25.4 // indirect - github.com/go-openapi/swag/cmdutils v0.25.4 // indirect - github.com/go-openapi/swag/conv v0.25.4 // indirect - github.com/go-openapi/swag/fileutils v0.25.4 // indirect - github.com/go-openapi/swag/jsonname v0.25.4 // indirect - github.com/go-openapi/swag/jsonutils v0.25.4 // indirect - github.com/go-openapi/swag/loading v0.25.4 // indirect - github.com/go-openapi/swag/mangling v0.25.4 // indirect - github.com/go-openapi/swag/netutils v0.25.4 // indirect - github.com/go-openapi/swag/stringutils v0.25.4 // indirect - github.com/go-openapi/swag/typeutils v0.25.4 // indirect - github.com/go-openapi/swag/yamlutils v0.25.4 // indirect + github.com/go-openapi/jsonpointer v0.22.5 // indirect + github.com/go-openapi/jsonreference v0.21.5 // indirect + github.com/go-openapi/swag v0.25.5 // indirect + github.com/go-openapi/swag/cmdutils v0.25.5 // indirect + github.com/go-openapi/swag/conv v0.25.5 // indirect + github.com/go-openapi/swag/fileutils v0.25.5 // indirect + github.com/go-openapi/swag/jsonname v0.25.5 // indirect + github.com/go-openapi/swag/jsonutils v0.25.5 // indirect + github.com/go-openapi/swag/loading v0.25.5 // indirect + github.com/go-openapi/swag/mangling v0.25.5 // indirect + github.com/go-openapi/swag/netutils v0.25.5 // indirect + github.com/go-openapi/swag/stringutils v0.25.5 // indirect + github.com/go-openapi/swag/typeutils v0.25.5 // indirect + github.com/go-openapi/swag/yamlutils v0.25.5 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/google/btree v1.1.3 // indirect - github.com/google/cel-go v0.27.0 // indirect + github.com/google/cel-go v0.28.0 // indirect github.com/google/gnostic-models v0.7.1 // indirect github.com/google/go-cmp v0.7.0 // indirect - github.com/google/go-containerregistry v0.21.1 // indirect - github.com/google/pprof v0.0.0-20260202012954-cb029daf43ef // indirect + github.com/google/go-containerregistry v0.21.5 // indirect + github.com/google/pprof v0.0.0-20260402051712-545e8a4df936 // indirect github.com/gorilla/handlers v1.5.2 // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 // indirect github.com/hashicorp/golang-lru/arc/v2 v2.0.7 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.18.4 // indirect + github.com/klauspost/compress v1.18.5 // indirect github.com/manifoldco/promptui v0.9.0 // indirect github.com/moby/sys/capability v0.4.0 // indirect github.com/moby/sys/mountinfo v0.7.2 // indirect @@ -113,57 +113,57 @@ require ( github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/common v0.67.5 // indirect github.com/prometheus/otlptranslator v1.0.0 // indirect - github.com/prometheus/procfs v0.20.0 // indirect + github.com/prometheus/procfs v0.20.1 // indirect github.com/redis/go-redis/extra/rediscmd/v9 v9.18.0 // indirect github.com/redis/go-redis/extra/redisotel/v9 v9.18.0 // indirect github.com/redis/go-redis/v9 v9.18.0 // indirect github.com/secure-systems-lab/go-securesystemslib v0.10.0 // indirect github.com/sigstore/fulcio v1.8.5 // indirect - github.com/sigstore/protobuf-specs v0.5.0 // indirect - github.com/sigstore/sigstore v1.10.4 // indirect + github.com/sigstore/protobuf-specs v0.5.1 // indirect + github.com/sigstore/sigstore v1.10.5 // indirect github.com/sirupsen/logrus v1.9.4 // indirect github.com/spf13/cobra v1.10.2 // indirect github.com/spf13/pflag v1.0.10 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect - go.opentelemetry.io/contrib/bridges/prometheus v0.65.0 // indirect - go.opentelemetry.io/contrib/exporters/autoexport v0.65.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 // indirect - go.opentelemetry.io/otel v1.40.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.16.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.16.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.40.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.40.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0 // indirect - go.opentelemetry.io/otel/exporters/prometheus v0.62.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.16.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.40.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.40.0 // indirect - go.opentelemetry.io/otel/log v0.16.0 // indirect - go.opentelemetry.io/otel/metric v1.40.0 // indirect - go.opentelemetry.io/otel/sdk v1.40.0 // indirect - go.opentelemetry.io/otel/sdk/log v0.16.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.40.0 // indirect - go.opentelemetry.io/otel/trace v1.40.0 // indirect - go.opentelemetry.io/proto/otlp v1.9.0 // indirect + go.opentelemetry.io/contrib/bridges/prometheus v0.68.0 // indirect + go.opentelemetry.io/contrib/exporters/autoexport v0.68.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.68.0 // indirect + go.opentelemetry.io/otel v1.43.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.19.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.19.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.43.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.43.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.43.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0 // indirect + go.opentelemetry.io/otel/exporters/prometheus v0.65.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.19.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.43.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.43.0 // indirect + go.opentelemetry.io/otel/log v0.19.0 // indirect + go.opentelemetry.io/otel/metric v1.43.0 // indirect + go.opentelemetry.io/otel/sdk v1.43.0 // indirect + go.opentelemetry.io/otel/sdk/log v0.19.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.43.0 // indirect + go.opentelemetry.io/otel/trace v1.43.0 // indirect + go.opentelemetry.io/proto/otlp v1.10.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect - go.yaml.in/yaml/v2 v2.4.3 // indirect + go.yaml.in/yaml/v2 v2.4.4 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect - golang.org/x/exp v0.0.0-20260218203240-3dfff04db8fa // indirect - golang.org/x/mod v0.33.0 // indirect - golang.org/x/net v0.51.0 // indirect - golang.org/x/oauth2 v0.35.0 // indirect - golang.org/x/sync v0.19.0 // indirect - golang.org/x/term v0.40.0 // indirect - golang.org/x/text v0.34.0 // indirect - golang.org/x/tools v0.42.0 // indirect + golang.org/x/exp v0.0.0-20260410095643-746e56fc9e2f // indirect + golang.org/x/mod v0.35.0 // indirect + golang.org/x/net v0.53.0 // indirect + golang.org/x/oauth2 v0.36.0 // indirect + golang.org/x/sync v0.20.0 // indirect + golang.org/x/term v0.42.0 // indirect + golang.org/x/text v0.36.0 // indirect + golang.org/x/tools v0.44.0 // indirect gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20260226221140-a57be14db171 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20260226221140-a57be14db171 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20260414002931-afd174a4e478 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260414002931-afd174a4e478 // indirect google.golang.org/protobuf v1.36.11 // indirect gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect @@ -173,7 +173,7 @@ require ( k8s.io/apiserver v0.34.1 // indirect k8s.io/component-base v0.34.1 // indirect k8s.io/kube-aggregator v0.34.1 // indirect - k8s.io/kube-openapi v0.0.0-20260127142750-a19766b6e2d4 // indirect + k8s.io/kube-openapi v0.0.0-20260414162039-ec9c827d403f // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.34.0 // indirect sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 // indirect diff --git a/go.sum b/go.sum index f545c9f8c..e2b062f85 100644 --- a/go.sum +++ b/go.sum @@ -8,6 +8,8 @@ github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1 github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alicebob/miniredis/v2 v2.35.0 h1:QwLphYqCEAo1eu1TqPRN2jgVMPBweeQcR21jeqDCONI= +github.com/alicebob/miniredis/v2 v2.35.0/go.mod h1:TcL7YfarKPGDAthEtl5NBeHZfeUQj6OXMm/+iu5cLMM= github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ= github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -35,8 +37,8 @@ github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObk github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04= github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= -github.com/cilium/ebpf v0.20.0 h1:atwWj9d3NffHyPZzVlx3hmw1on5CLe9eljR8VuHTwhM= -github.com/cilium/ebpf v0.20.0/go.mod h1:pzLjFymM+uZPLk/IXZUL63xdx5VXEo+enTzxkZXdycw= +github.com/cilium/ebpf v0.21.0 h1:4dpx1J/B/1apeTmWBH5BkVLayHTkFrMovVPnHEk+l3k= +github.com/cilium/ebpf v0.21.0/go.mod h1:1kHKv6Kvh5a6TePP5vvvoMa1bclRyzUXELSs272fmIQ= github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= @@ -60,12 +62,12 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/distribution/distribution/v3 v3.0.0 h1:q4R8wemdRQDClzoNNStftB2ZAfqOiN6UX90KJc4HjyM= -github.com/distribution/distribution/v3 v3.0.0/go.mod h1:tRNuFoZsUdyRVegq8xGNeds4KLjwLCRin/tTo6i1DhU= +github.com/distribution/distribution/v3 v3.1.0 h1:u1v788HreKTLGdNY6s7px8Exgrs9mZ9UrCDjSrpCM8g= +github.com/distribution/distribution/v3 v3.1.0/go.mod h1:73BuF5/ziMHNVt7nnL1roYpH4Eg/FgUlKZm3WryIx/o= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/cli v29.2.1+incompatible h1:n3Jt0QVCN65eiVBoUTZQM9mcQICCJt3akW4pKAbKdJg= -github.com/docker/cli v29.2.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v29.4.0+incompatible h1:+IjXULMetlvWJiuSI0Nbor36lcJ5BTcVpUmB21KBoVM= +github.com/docker/cli v29.4.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM= @@ -90,8 +92,8 @@ github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2 github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= -github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= -github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/fxamacker/cbor/v2 v2.9.1 h1:2rWm8B193Ll4VdjsJY28jxs70IdDsHRWgQYAI80+rMQ= +github.com/fxamacker/cbor/v2 v2.9.1/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/gkampitakis/ciinfo v0.3.2 h1:JcuOPk8ZU7nZQjdUhctuhQofk7BGHuIy0c9Ez8BNhXs= github.com/gkampitakis/ciinfo v0.3.2/go.mod h1:1NIwaOcFChN4fa/B0hEBdAb6npDlFL8Bwx4dfRLRqAo= github.com/gkampitakis/go-diff v1.3.2 h1:Qyn0J9XJSDTgnsgHRdz9Zp24RaJeKMUHg2+PDZZdC4M= @@ -110,40 +112,40 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= -github.com/go-openapi/jsonpointer v0.22.4 h1:dZtK82WlNpVLDW2jlA1YCiVJFVqkED1MegOUy9kR5T4= -github.com/go-openapi/jsonpointer v0.22.4/go.mod h1:elX9+UgznpFhgBuaMQ7iu4lvvX1nvNsesQ3oxmYTw80= -github.com/go-openapi/jsonreference v0.21.4 h1:24qaE2y9bx/q3uRK/qN+TDwbok1NhbSmGjjySRCHtC8= -github.com/go-openapi/jsonreference v0.21.4/go.mod h1:rIENPTjDbLpzQmQWCj5kKj3ZlmEh+EFVbz3RTUh30/4= -github.com/go-openapi/swag v0.25.4 h1:OyUPUFYDPDBMkqyxOTkqDYFnrhuhi9NR6QVUvIochMU= -github.com/go-openapi/swag v0.25.4/go.mod h1:zNfJ9WZABGHCFg2RnY0S4IOkAcVTzJ6z2Bi+Q4i6qFQ= -github.com/go-openapi/swag/cmdutils v0.25.4 h1:8rYhB5n6WawR192/BfUu2iVlxqVR9aRgGJP6WaBoW+4= -github.com/go-openapi/swag/cmdutils v0.25.4/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0= -github.com/go-openapi/swag/conv v0.25.4 h1:/Dd7p0LZXczgUcC/Ikm1+YqVzkEeCc9LnOWjfkpkfe4= -github.com/go-openapi/swag/conv v0.25.4/go.mod h1:3LXfie/lwoAv0NHoEuY1hjoFAYkvlqI/Bn5EQDD3PPU= -github.com/go-openapi/swag/fileutils v0.25.4 h1:2oI0XNW5y6UWZTC7vAxC8hmsK/tOkWXHJQH4lKjqw+Y= -github.com/go-openapi/swag/fileutils v0.25.4/go.mod h1:cdOT/PKbwcysVQ9Tpr0q20lQKH7MGhOEb6EwmHOirUk= -github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI= -github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag= -github.com/go-openapi/swag/jsonutils v0.25.4 h1:VSchfbGhD4UTf4vCdR2F4TLBdLwHyUDTd1/q4i+jGZA= -github.com/go-openapi/swag/jsonutils v0.25.4/go.mod h1:7OYGXpvVFPn4PpaSdPHJBtF0iGnbEaTk8AvBkoWnaAY= -github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4 h1:IACsSvBhiNJwlDix7wq39SS2Fh7lUOCJRmx/4SN4sVo= -github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4/go.mod h1:Mt0Ost9l3cUzVv4OEZG+WSeoHwjWLnarzMePNDAOBiM= -github.com/go-openapi/swag/loading v0.25.4 h1:jN4MvLj0X6yhCDduRsxDDw1aHe+ZWoLjW+9ZQWIKn2s= -github.com/go-openapi/swag/loading v0.25.4/go.mod h1:rpUM1ZiyEP9+mNLIQUdMiD7dCETXvkkC30z53i+ftTE= -github.com/go-openapi/swag/mangling v0.25.4 h1:2b9kBJk9JvPgxr36V23FxJLdwBrpijI26Bx5JH4Hp48= -github.com/go-openapi/swag/mangling v0.25.4/go.mod h1:6dxwu6QyORHpIIApsdZgb6wBk/DPU15MdyYj/ikn0Hg= -github.com/go-openapi/swag/netutils v0.25.4 h1:Gqe6K71bGRb3ZQLusdI8p/y1KLgV4M/k+/HzVSqT8H0= -github.com/go-openapi/swag/netutils v0.25.4/go.mod h1:m2W8dtdaoX7oj9rEttLyTeEFFEBvnAx9qHd5nJEBzYg= -github.com/go-openapi/swag/stringutils v0.25.4 h1:O6dU1Rd8bej4HPA3/CLPciNBBDwZj9HiEpdVsb8B5A8= -github.com/go-openapi/swag/stringutils v0.25.4/go.mod h1:GTsRvhJW5xM5gkgiFe0fV3PUlFm0dr8vki6/VSRaZK0= -github.com/go-openapi/swag/typeutils v0.25.4 h1:1/fbZOUN472NTc39zpa+YGHn3jzHWhv42wAJSN91wRw= -github.com/go-openapi/swag/typeutils v0.25.4/go.mod h1:Ou7g//Wx8tTLS9vG0UmzfCsjZjKhpjxayRKTHXf2pTE= -github.com/go-openapi/swag/yamlutils v0.25.4 h1:6jdaeSItEUb7ioS9lFoCZ65Cne1/RZtPBZ9A56h92Sw= -github.com/go-openapi/swag/yamlutils v0.25.4/go.mod h1:MNzq1ulQu+yd8Kl7wPOut/YHAAU/H6hL91fF+E2RFwc= -github.com/go-openapi/testify/enable/yaml/v2 v2.0.2 h1:0+Y41Pz1NkbTHz8NngxTuAXxEodtNSI1WG1c/m5Akw4= -github.com/go-openapi/testify/enable/yaml/v2 v2.0.2/go.mod h1:kme83333GCtJQHXQ8UKX3IBZu6z8T5Dvy5+CW3NLUUg= -github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls= -github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= +github.com/go-openapi/jsonpointer v0.22.5 h1:8on/0Yp4uTb9f4XvTrM2+1CPrV05QPZXu+rvu2o9jcA= +github.com/go-openapi/jsonpointer v0.22.5/go.mod h1:gyUR3sCvGSWchA2sUBJGluYMbe1zazrYWIkWPjjMUY0= +github.com/go-openapi/jsonreference v0.21.5 h1:6uCGVXU/aNF13AQNggxfysJ+5ZcU4nEAe+pJyVWRdiE= +github.com/go-openapi/jsonreference v0.21.5/go.mod h1:u25Bw85sX4E2jzFodh1FOKMTZLcfifd1Q+iKKOUxExw= +github.com/go-openapi/swag v0.25.5 h1:pNkwbUEeGwMtcgxDr+2GBPAk4kT+kJ+AaB+TMKAg+TU= +github.com/go-openapi/swag v0.25.5/go.mod h1:B3RT6l8q7X803JRxa2e59tHOiZlX1t8viplOcs9CwTA= +github.com/go-openapi/swag/cmdutils v0.25.5 h1:yh5hHrpgsw4NwM9KAEtaDTXILYzdXh/I8Whhx9hKj7c= +github.com/go-openapi/swag/cmdutils v0.25.5/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0= +github.com/go-openapi/swag/conv v0.25.5 h1:wAXBYEXJjoKwE5+vc9YHhpQOFj2JYBMF2DUi+tGu97g= +github.com/go-openapi/swag/conv v0.25.5/go.mod h1:CuJ1eWvh1c4ORKx7unQnFGyvBbNlRKbnRyAvDvzWA4k= +github.com/go-openapi/swag/fileutils v0.25.5 h1:B6JTdOcs2c0dBIs9HnkyTW+5gC+8NIhVBUwERkFhMWk= +github.com/go-openapi/swag/fileutils v0.25.5/go.mod h1:V3cT9UdMQIaH4WiTrUc9EPtVA4txS0TOmRURmhGF4kc= +github.com/go-openapi/swag/jsonname v0.25.5 h1:8p150i44rv/Drip4vWI3kGi9+4W9TdI3US3uUYSFhSo= +github.com/go-openapi/swag/jsonname v0.25.5/go.mod h1:jNqqikyiAK56uS7n8sLkdaNY/uq6+D2m2LANat09pKU= +github.com/go-openapi/swag/jsonutils v0.25.5 h1:XUZF8awQr75MXeC+/iaw5usY/iM7nXPDwdG3Jbl9vYo= +github.com/go-openapi/swag/jsonutils v0.25.5/go.mod h1:48FXUaz8YsDAA9s5AnaUvAmry1UcLcNVWUjY42XkrN4= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.5 h1:SX6sE4FrGb4sEnnxbFL/25yZBb5Hcg1inLeErd86Y1U= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.5/go.mod h1:/2KvOTrKWjVA5Xli3DZWdMCZDzz3uV/T7bXwrKWPquo= +github.com/go-openapi/swag/loading v0.25.5 h1:odQ/umlIZ1ZVRteI6ckSrvP6e2w9UTF5qgNdemJHjuU= +github.com/go-openapi/swag/loading v0.25.5/go.mod h1:I8A8RaaQ4DApxhPSWLNYWh9NvmX2YKMoB9nwvv6oW6g= +github.com/go-openapi/swag/mangling v0.25.5 h1:hyrnvbQRS7vKePQPHHDso+k6CGn5ZBs5232UqWZmJZw= +github.com/go-openapi/swag/mangling v0.25.5/go.mod h1:6hadXM/o312N/h98RwByLg088U61TPGiltQn71Iw0NY= +github.com/go-openapi/swag/netutils v0.25.5 h1:LZq2Xc2QI8+7838elRAaPCeqJnHODfSyOa7ZGfxDKlU= +github.com/go-openapi/swag/netutils v0.25.5/go.mod h1:lHbtmj4m57APG/8H7ZcMMSWzNqIQcu0RFiXrPUara14= +github.com/go-openapi/swag/stringutils v0.25.5 h1:NVkoDOA8YBgtAR/zvCx5rhJKtZF3IzXcDdwOsYzrB6M= +github.com/go-openapi/swag/stringutils v0.25.5/go.mod h1:PKK8EZdu4QJq8iezt17HM8RXnLAzY7gW0O1KKarrZII= +github.com/go-openapi/swag/typeutils v0.25.5 h1:EFJ+PCga2HfHGdo8s8VJXEVbeXRCYwzzr9u4rJk7L7E= +github.com/go-openapi/swag/typeutils v0.25.5/go.mod h1:itmFmScAYE1bSD8C4rS0W+0InZUBrB2xSPbWt6DLGuc= +github.com/go-openapi/swag/yamlutils v0.25.5 h1:kASCIS+oIeoc55j28T4o8KwlV2S4ZLPT6G0iq2SSbVQ= +github.com/go-openapi/swag/yamlutils v0.25.5/go.mod h1:Gek1/SjjfbYvM+Iq4QGwa/2lEXde9n2j4a3wI3pNuOQ= +github.com/go-openapi/testify/enable/yaml/v2 v2.4.0 h1:7SgOMTvJkM8yWrQlU8Jm18VeDPuAvB/xWrdxFJkoFag= +github.com/go-openapi/testify/enable/yaml/v2 v2.4.0/go.mod h1:14iV8jyyQlinc9StD7w1xVPW3CO3q1Gj04Jy//Kw4VM= +github.com/go-openapi/testify/v2 v2.4.0 h1:8nsPrHVCWkQ4p8h1EsRVymA2XABB4OT40gcvAu+voFM= +github.com/go-openapi/testify/v2 v2.4.0/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= github.com/go-quicktest/qt v1.101.1-0.20240301121107-c6c8733fa1e6 h1:teYtXy9B7y5lHTp8V9KPxpYRAVA7dozigQcMiBust1s= github.com/go-quicktest/qt v1.101.1-0.20240301121107-c6c8733fa1e6/go.mod h1:p4lGIVX+8Wa6ZPNDvqcxq36XpUDLh42FLetFU7odllI= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -161,20 +163,20 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/cel-go v0.27.0 h1:e7ih85+4qVrBuqQWTW4FKSqZYokVuc3HnhH5keboFTo= -github.com/google/cel-go v0.27.0/go.mod h1:tTJ11FWqnhw5KKpnWpvW9CJC3Y9GK4EIS0WXnBbebzw= +github.com/google/cel-go v0.28.0 h1:KjSWstCpz/MN5t4a8gnGJNIYUsJRpdi/r97xWDphIQc= +github.com/google/cel-go v0.28.0/go.mod h1:X0bD6iVNR8pkROSOoHVdgTkzmRcosof7WQqCD6wcMc8= github.com/google/gnostic-models v0.7.1 h1:SisTfuFKJSKM5CPZkffwi6coztzzeYUhc3v4yxLWH8c= github.com/google/gnostic-models v0.7.1/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/go-containerregistry v0.21.1 h1:sOt/o9BS2b87FnR7wxXPvRKU1XVJn2QCwOS5g8zQXlc= -github.com/google/go-containerregistry v0.21.1/go.mod h1:ctO5aCaewH4AK1AumSF5DPW+0+R+d2FmylMJdp5G7p0= +github.com/google/go-containerregistry v0.21.5 h1:KTJG9Pn/jC0VdZR6ctV3/jcN+q6/Iqlx0sTVz3ywZlM= +github.com/google/go-containerregistry v0.21.5/go.mod h1:ySvMuiWg+dOsRW0Hw8GYwfMwBlNRTmpYBFJPlkco5zU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20260202012954-cb029daf43ef h1:xpF9fUHpoIrrjX24DURVKiwHcFpw19ndIs+FwTSMbno= -github.com/google/pprof v0.0.0-20260202012954-cb029daf43ef/go.mod h1:MxpfABSjhmINe3F1It9d+8exIHFvUqtLIRCdOGNXqiI= +github.com/google/pprof v0.0.0-20260402051712-545e8a4df936 h1:EwtI+Al+DeppwYX2oXJCETMO23COyaKGP6fHVpkpWpg= +github.com/google/pprof v0.0.0-20260402051712-545e8a4df936/go.mod h1:MxpfABSjhmINe3F1It9d+8exIHFvUqtLIRCdOGNXqiI= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= @@ -202,8 +204,8 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.18.4 h1:RPhnKRAQ4Fh8zU2FY/6ZFDwTVTxgJ/EMydqSTzE9a2c= -github.com/klauspost/compress v1.18.4/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= +github.com/klauspost/compress v1.18.5 h1:/h1gH5Ce+VWNLSWqPzOVn6XBO+vJbCNGvjoaGBFW2IE= +github.com/klauspost/compress v1.18.5/go.mod h1:cwPg85FWrGar70rWktvGQj8/hthj3wpl0PGDogxkrSQ= github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= @@ -223,8 +225,8 @@ github.com/maruel/natural v1.1.1/go.mod h1:v+Rfd79xlw1AgVBjbO0BEQmptqb5HvL/k9GRH github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= -github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= -github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= +github.com/mdlayher/socket v0.5.1 h1:VZaqt6RkGkt2OE9l3GcC6nZkqD3xKeQLyfleW/uBcos= +github.com/mdlayher/socket v0.5.1/go.mod h1:TjPLHI1UgwEv5J1B5q0zTZq12A/6H7nKmtTanQE37IQ= github.com/mfridman/tparse v0.18.0 h1:wh6dzOKaIwkUGyKgOntDW4liXSo37qg5AXbIhkMV3vE= github.com/mfridman/tparse v0.18.0/go.mod h1:gEvqZTuCgEhPbYk/2lS3Kcxg1GmTxxU7kTC8DvP0i/A= github.com/moby/sys/capability v0.4.0 h1:4D4mI6KlNtWMCM1Z/K0i7RV1FkX+DBDHKVJpCndZoHk= @@ -262,8 +264,8 @@ github.com/openshift/client-go v0.0.0-20260219131751-7e63ce155298 h1:V8uz/2Z4hh+ github.com/openshift/client-go v0.0.0-20260219131751-7e63ce155298/go.mod h1:rtH0BhilT6+jn3nWybANEumaBO1vWCKaY8QpwipRy/Y= github.com/openshift/library-go v0.0.0-20260223145824-7b234b47a906 h1:PkG3CmlU8+HtlW1rspnhwhbKki8rrwYN+L26aH11t2E= github.com/openshift/library-go v0.0.0-20260223145824-7b234b47a906/go.mod h1:K3FoNLgNBFYbFuG+Kr8usAnQxj1w84XogyUp2M8rK8k= -github.com/panjf2000/ants/v2 v2.11.5 h1:a7LMnMEeux/ebqTux140tRiaqcFTV0q2bEHF03nl6Rg= -github.com/panjf2000/ants/v2 v2.11.5/go.mod h1:8u92CYMUc6gyvTIw8Ru7Mt7+/ESnJahz5EVtqfrilek= +github.com/panjf2000/ants/v2 v2.12.0 h1:u9JhESo83i/GkZnhfTNuFMMWcNt7mnV1bGJ6FT4wXH8= +github.com/panjf2000/ants/v2 v2.12.0/go.mod h1:tSQuaNQ6r6NRhPt+IZVUevvDyFMTs+eS4ztZc52uJTY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -292,8 +294,8 @@ github.com/prometheus/otlptranslator v1.0.0/go.mod h1:vRYWnXvI6aWGpsdY/mOT/cbeVR github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.20.0 h1:AA7aCvjxwAquZAlonN7888f2u4IN8WVeFgBi4k82M4Q= -github.com/prometheus/procfs v0.20.0/go.mod h1:o9EMBZGRyvDrSPH1RqdxhojkuXstoe4UlK79eF5TGGo= +github.com/prometheus/procfs v0.20.1 h1:XwbrGOIplXW/AU3YhIhLODXMJYyC1isLFfYCsTEycfc= +github.com/prometheus/procfs v0.20.1/go.mod h1:o9EMBZGRyvDrSPH1RqdxhojkuXstoe4UlK79eF5TGGo= github.com/redis/go-redis/extra/rediscmd/v9 v9.18.0 h1:QY4nmPHLFAJjtT5O4OMUEOxP8WVaRNOFpcbmxT2NLZU= github.com/redis/go-redis/extra/rediscmd/v9 v9.18.0/go.mod h1:WH8cY/0fT41Bsf341qzo8v4nx0GCE8FykAA23IVbVmo= github.com/redis/go-redis/extra/redisotel/v9 v9.18.0 h1:2dKdoEYBJ0CZCLPiCdvvc7luz3DPwY6hKdzjL6m1eHE= @@ -311,10 +313,10 @@ github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw= github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/sigstore/fulcio v1.8.5 h1:HYTD1/L5wlBp8JxsWxUf8hmfaNBBF/x3r3p5l6tZwbA= github.com/sigstore/fulcio v1.8.5/go.mod h1:tSLYK3JsKvJpDW1BsIsVHZgHj+f8TjXARzqIUWSsSPQ= -github.com/sigstore/protobuf-specs v0.5.0 h1:F8YTI65xOHw70NrvPwJ5PhAzsvTnuJMGLkA4FIkofAY= -github.com/sigstore/protobuf-specs v0.5.0/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= -github.com/sigstore/sigstore v1.10.4 h1:ytOmxMgLdcUed3w1SbbZOgcxqwMG61lh1TmZLN+WeZE= -github.com/sigstore/sigstore v1.10.4/go.mod h1:tDiyrdOref3q6qJxm2G+JHghqfmvifB7hw+EReAfnbI= +github.com/sigstore/protobuf-specs v0.5.1 h1:/5OPaNuolRJmQfeZLayJGFXMpsRJEdgC6ah1/+7Px7U= +github.com/sigstore/protobuf-specs v0.5.1/go.mod h1:DRBzpFuE+LnvQMN10/dU6nBeKwVLGEQ6o2FovN2Rats= +github.com/sigstore/sigstore v1.10.5 h1:KqrOjDhNOVY+uOzQFat2FrGLClPPCb3uz8pK3wuI+ow= +github.com/sigstore/sigstore v1.10.5/go.mod h1:k/mcVVXw3I87dYG/iCVTSW2xTrW7vPzxxGic4KqsqXs= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w= @@ -351,56 +353,58 @@ github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M= +github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= -go.opentelemetry.io/contrib/bridges/prometheus v0.65.0 h1:I/7S/yWobR3QHFLqHsJ8QOndoiFsj1VgHpQiq43KlUI= -go.opentelemetry.io/contrib/bridges/prometheus v0.65.0/go.mod h1:jPF6gn3y1E+nozCAEQj3c6NZ8KY+tvAgSVfvoOJUFac= -go.opentelemetry.io/contrib/exporters/autoexport v0.65.0 h1:2gApdml7SznX9szEKFjKjM4qGcGSvAybYLBY319XG3g= -go.opentelemetry.io/contrib/exporters/autoexport v0.65.0/go.mod h1:0QqAGlbHXhmPYACG3n5hNzO5DnEqqtg4VcK5pr22RI0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 h1:7iP2uCb7sGddAr30RRS6xjKy7AZ2JtTOPA3oolgVSw8= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0/go.mod h1:c7hN3ddxs/z6q9xwvfLPk+UHlWRQyaeR1LdgfL/66l0= -go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= -go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.16.0 h1:ZVg+kCXxd9LtAaQNKBxAvJ5NpMf7LpvEr4MIZqb0TMQ= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.16.0/go.mod h1:hh0tMeZ75CCXrHd9OXRYxTlCAdxcXioWHFIpYw2rZu8= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.16.0 h1:djrxvDxAe44mJUrKataUbOhCKhR3F8QCyWucO16hTQs= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.16.0/go.mod h1:dt3nxpQEiSoKvfTVxp3TUg5fHPLhKtbcnN3Z1I1ePD0= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.40.0 h1:NOyNnS19BF2SUDApbOKbDtWZ0IK7b8FJ2uAGdIWOGb0= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.40.0/go.mod h1:VL6EgVikRLcJa9ftukrHu/ZkkhFBSo1lzvdBC9CF1ss= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.40.0 h1:9y5sHvAxWzft1WQ4BwqcvA+IFVUJ1Ya75mSAUnFEVwE= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.40.0/go.mod h1:eQqT90eR3X5Dbs1g9YSM30RavwLF725Ris5/XSXWvqE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 h1:QKdN8ly8zEMrByybbQgv8cWBcdAarwmIPZ6FThrWXJs= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0/go.mod h1:bTdK1nhqF76qiPoCCdyFIV+N/sRHYXYCTQc+3VCi3MI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0 h1:DvJDOPmSWQHWywQS6lKL+pb8s3gBLOZUtw4N+mavW1I= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0/go.mod h1:EtekO9DEJb4/jRyN4v4Qjc2yA7AtfCBuz2FynRUWTXs= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0 h1:wVZXIWjQSeSmMoxF74LzAnpVQOAFDo3pPji9Y4SOFKc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0/go.mod h1:khvBS2IggMFNwZK/6lEeHg/W57h/IX6J4URh57fuI40= -go.opentelemetry.io/otel/exporters/prometheus v0.62.0 h1:krvC4JMfIOVdEuNPTtQ0ZjCiXrybhv+uOHMfHRmnvVo= -go.opentelemetry.io/otel/exporters/prometheus v0.62.0/go.mod h1:fgOE6FM/swEnsVQCqCnbOfRV4tOnWPg7bVeo4izBuhQ= -go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.16.0 h1:ivlbaajBWJqhcCPniDqDJmRwj4lc6sRT+dCAVKNmxlQ= -go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.16.0/go.mod h1:u/G56dEKDDwXNCVLsbSrllB2o8pbtFLUC4HpR66r2dc= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.40.0 h1:ZrPRak/kS4xI3AVXy8F7pipuDXmDsrO8Lg+yQjBLjw0= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.40.0/go.mod h1:3y6kQCWztq6hyW8Z9YxQDDm0Je9AJoFar2G0yDcmhRk= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.40.0 h1:MzfofMZN8ulNqobCmCAVbqVL5syHw+eB2qPRkCMA/fQ= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.40.0/go.mod h1:E73G9UFtKRXrxhBsHtG00TB5WxX57lpsQzogDkqBTz8= -go.opentelemetry.io/otel/log v0.16.0 h1:DeuBPqCi6pQwtCK0pO4fvMB5eBq6sNxEnuTs88pjsN4= -go.opentelemetry.io/otel/log v0.16.0/go.mod h1:rWsmqNVTLIA8UnwYVOItjyEZDbKIkMxdQunsIhpUMes= -go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= -go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= -go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= -go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= -go.opentelemetry.io/otel/sdk/log v0.16.0 h1:e/b4bdlQwC5fnGtG3dlXUrNOnP7c8YLVSpSfEBIkTnI= -go.opentelemetry.io/otel/sdk/log v0.16.0/go.mod h1:JKfP3T6ycy7QEuv3Hj8oKDy7KItrEkus8XJE6EoSzw4= -go.opentelemetry.io/otel/sdk/log/logtest v0.16.0 h1:/XVkpZ41rVRTP4DfMgYv1nEtNmf65XPPyAdqV90TMy4= -go.opentelemetry.io/otel/sdk/log/logtest v0.16.0/go.mod h1:iOOPgQr5MY9oac/F5W86mXdeyWZGleIx3uXO98X2R6Y= -go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= -go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= -go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= -go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= -go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A= -go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4= +go.opentelemetry.io/contrib/bridges/prometheus v0.68.0 h1:w3zlHYETbDwXyWHZlyyR58ZC39XGi8rAhkBgUgJ9d5w= +go.opentelemetry.io/contrib/bridges/prometheus v0.68.0/go.mod h1:GR/mClR2nn7vE8RLwxKjoBNg+QtgdDhRzxVa93koy5o= +go.opentelemetry.io/contrib/exporters/autoexport v0.68.0 h1:0D3GFvELGIwQGfC6agLsbrEYSGWZTRTxIXxcQUqrOuk= +go.opentelemetry.io/contrib/exporters/autoexport v0.68.0/go.mod h1:DM2NV7Zb8CcGeVPt6glouY0FAiwZQ/iqgcWExhgWeN8= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.68.0 h1:CqXxU8VOmDefoh0+ztfGaymYbhdB/tT3zs79QaZTNGY= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.68.0/go.mod h1:BuhAPThV8PBHBvg8ZzZ/Ok3idOdhWIodywz2xEcRbJo= +go.opentelemetry.io/otel v1.43.0 h1:mYIM03dnh5zfN7HautFE4ieIig9amkNANT+xcVxAj9I= +go.opentelemetry.io/otel v1.43.0/go.mod h1:JuG+u74mvjvcm8vj8pI5XiHy1zDeoCS2LB1spIq7Ay0= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.19.0 h1:Dn8rkudDzY6KV9dr/D/bTUuWgqDf9xe0rr4G2elrn0Y= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.19.0/go.mod h1:gMk9F0xDgyN9M/3Ed5Y1wKcx/9mlU91NXY2SNq7RQuU= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.19.0 h1:HIBTQ3VO5aupLKjC90JgMqpezVXwFuq6Ryjn0/izoag= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.19.0/go.mod h1:ji9vId85hMxqfvICA0Jt8JqEdrXaAkcpkI9HPXya0ro= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.43.0 h1:8UQVDcZxOJLtX6gxtDt3vY2WTgvZqMQRzjsqiIHQdkc= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.43.0/go.mod h1:2lmweYCiHYpEjQ/lSJBYhj9jP1zvCvQW4BqL9dnT7FQ= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.43.0 h1:w1K+pCJoPpQifuVpsKamUdn9U0zM3xUziVOqsGksUrY= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.43.0/go.mod h1:HBy4BjzgVE8139ieRI75oXm3EcDN+6GhD88JT1Kjvxg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 h1:88Y4s2C8oTui1LGM6bTWkw0ICGcOLCAI5l6zsD1j20k= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0/go.mod h1:Vl1/iaggsuRlrHf/hfPJPvVag77kKyvrLeD10kpMl+A= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.43.0 h1:RAE+JPfvEmvy+0LzyUA25/SGawPwIUbZ6u0Wug54sLc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.43.0/go.mod h1:AGmbycVGEsRx9mXMZ75CsOyhSP6MFIcj/6dnG+vhVjk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0 h1:3iZJKlCZufyRzPzlQhUIWVmfltrXuGyfjREgGP3UUjc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0/go.mod h1:/G+nUPfhq2e+qiXMGxMwumDrP5jtzU+mWN7/sjT2rak= +go.opentelemetry.io/otel/exporters/prometheus v0.65.0 h1:jOveH/b4lU9HT7y+Gfamf18BqlOuz2PWEvs8yM7Q6XE= +go.opentelemetry.io/otel/exporters/prometheus v0.65.0/go.mod h1:i1P8pcumauPtUI4YNopea1dhzEMuEqWP1xoUZDylLHo= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.19.0 h1:GJkybS+crDMdExT/BUNCEgfrmfboztcS6PhvSo88HKM= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.19.0/go.mod h1:NuAyxRYIG2lKX3YQkB+83StTxM7s52PUUkRRiC0wnYI= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.43.0 h1:TC+BewnDpeiAmcscXbGMfxkO+mwYUwE/VySwvw88PfA= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.43.0/go.mod h1:J/ZyF4vfPwsSr9xJSPyQ4LqtcTPULFR64KwTikGLe+A= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.43.0 h1:mS47AX77OtFfKG4vtp+84kuGSFZHTyxtXIN269vChY0= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.43.0/go.mod h1:PJnsC41lAGncJlPUniSwM81gc80GkgWJWr3cu2nKEtU= +go.opentelemetry.io/otel/log v0.19.0 h1:KUZs/GOsw79TBBMfDWsXS+KZ4g2Ckzksd1ymzsIEbo4= +go.opentelemetry.io/otel/log v0.19.0/go.mod h1:5DQYeGmxVIr4n0/BcJvF4upsraHjg6vudJJpnkL6Ipk= +go.opentelemetry.io/otel/metric v1.43.0 h1:d7638QeInOnuwOONPp4JAOGfbCEpYb+K6DVWvdxGzgM= +go.opentelemetry.io/otel/metric v1.43.0/go.mod h1:RDnPtIxvqlgO8GRW18W6Z/4P462ldprJtfxHxyKd2PY= +go.opentelemetry.io/otel/sdk v1.43.0 h1:pi5mE86i5rTeLXqoF/hhiBtUNcrAGHLKQdhg4h4V9Dg= +go.opentelemetry.io/otel/sdk v1.43.0/go.mod h1:P+IkVU3iWukmiit/Yf9AWvpyRDlUeBaRg6Y+C58QHzg= +go.opentelemetry.io/otel/sdk/log v0.19.0 h1:scYVLqT22D2gqXItnWiocLUKGH9yvkkeql5dBDiXyko= +go.opentelemetry.io/otel/sdk/log v0.19.0/go.mod h1:vFBowwXGLlW9AvpuF7bMgnNI95LiW10szrOdvzBHlAg= +go.opentelemetry.io/otel/sdk/log/logtest v0.19.0 h1:BEbF7ZBB6qQloV/Ub1+3NQoOUnVtcGkU3XX4Ws3GQfk= +go.opentelemetry.io/otel/sdk/log/logtest v0.19.0/go.mod h1:Lua81/3yM0wOmoHTokLj9y9ADeA02v1naRrVrkAZuKk= +go.opentelemetry.io/otel/sdk/metric v1.43.0 h1:S88dyqXjJkuBNLeMcVPRFXpRw2fuwdvfCGLEo89fDkw= +go.opentelemetry.io/otel/sdk/metric v1.43.0/go.mod h1:C/RJtwSEJ5hzTiUz5pXF1kILHStzb9zFlIEe85bhj6A= +go.opentelemetry.io/otel/trace v1.43.0 h1:BkNrHpup+4k4w+ZZ86CZoHHEkohws8AY+WTX09nk+3A= +go.opentelemetry.io/otel/trace v1.43.0/go.mod h1:/QJhyVBUUswCphDVxq+8mld+AvhXZLhe+8WVFxiFff0= +go.opentelemetry.io/proto/otlp v1.10.0 h1:IQRWgT5srOCYfiWnpqUYz9CVmbO8bFmKcwYxpuCSL2g= +go.opentelemetry.io/proto/otlp v1.10.0/go.mod h1:/CV4QoCR/S9yaPj8utp3lvQPoqMtxXdzn7ozvvozVqk= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -409,39 +413,39 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= -go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +go.yaml.in/yaml/v2 v2.4.4 h1:tuyd0P+2Ont/d6e2rl3be67goVK4R6deVxCUX5vyPaQ= +go.yaml.in/yaml/v2 v2.4.4/go.mod h1:gMZqIpDtDqOfM0uNfy0SkpRhvUryYH0Z6wdMYcacYXQ= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts= -golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos= -golang.org/x/exp v0.0.0-20260218203240-3dfff04db8fa h1:Zt3DZoOFFYkKhDT3v7Lm9FDMEV06GpzjG2jrqW+QTE0= -golang.org/x/exp v0.0.0-20260218203240-3dfff04db8fa/go.mod h1:K79w1Vqn7PoiZn+TkNpx3BUWUQksGO3JcVX6qIjytmA= +golang.org/x/crypto v0.50.0 h1:zO47/JPrL6vsNkINmLoo/PH1gcxpls50DNogFvB5ZGI= +golang.org/x/crypto v0.50.0/go.mod h1:3muZ7vA7PBCE6xgPX7nkzzjiUq87kRItoJQM1Yo8S+Q= +golang.org/x/exp v0.0.0-20260410095643-746e56fc9e2f h1:W3F4c+6OLc6H2lb//N1q4WpJkhzJCK5J6kUi1NTVXfM= +golang.org/x/exp v0.0.0-20260410095643-746e56fc9e2f/go.mod h1:J1xhfL/vlindoeF/aINzNzt2Bket5bjo9sdOYzOsU80= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= -golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= +golang.org/x/mod v0.35.0 h1:Ww1D637e6Pg+Zb2KrWfHQUnH2dQRLBQyAtpr/haaJeM= +golang.org/x/mod v0.35.0/go.mod h1:+GwiRhIInF8wPm+4AoT6L0FA1QWAad3OMdTRx4tFYlU= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.51.0 h1:94R/GTO7mt3/4wIKpcR5gkGmRLOuE/2hNGeWq/GBIFo= -golang.org/x/net v0.51.0/go.mod h1:aamm+2QF5ogm02fjy5Bb7CQ0WMt1/WVM7FtyaTLlA9Y= -golang.org/x/oauth2 v0.35.0 h1:Mv2mzuHuZuY2+bkyWXIHMfhNdJAdwW3FuWeCPYN5GVQ= -golang.org/x/oauth2 v0.35.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/net v0.53.0 h1:d+qAbo5L0orcWAr0a9JweQpjXF19LMXJE8Ey7hwOdUA= +golang.org/x/net v0.53.0/go.mod h1:JvMuJH7rrdiCfbeHoo3fCQU24Lf5JJwT9W3sJFulfgs= +golang.org/x/oauth2 v0.36.0 h1:peZ/1z27fi9hUOFCAZaHyrpWG5lwe0RJEEEeH0ThlIs= +golang.org/x/oauth2 v0.36.0/go.mod h1:YDBUJMTkDnJS+A4BP4eZBjCqtokkg1hODuPjwiGPO7Q= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= -golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4= +golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -451,36 +455,36 @@ golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k= -golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg= -golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM= +golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI= +golang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= +golang.org/x/term v0.42.0 h1:UiKe+zDFmJobeJ5ggPwOshJIVt6/Ft0rcfrXZDLWAWY= +golang.org/x/term v0.42.0/go.mod h1:Dq/D+snpsbazcBG5+F9Q1n2rXV8Ma+71xEjTRufARgY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk= -golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA= -golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= -golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= +golang.org/x/text v0.36.0 h1:JfKh3XmcRPqZPKevfXVpI1wXPTqbkE5f7JA92a55Yxg= +golang.org/x/text v0.36.0/go.mod h1:NIdBknypM8iqVmPiuco0Dh6P5Jcdk8lJL0CUebqK164= +golang.org/x/time v0.15.0 h1:bbrp8t3bGUeFOx08pvsMYRTCVSMk89u4tKbNOZbp88U= +golang.org/x/time v0.15.0/go.mod h1:Y4YMaQmXwGQZoFaVFk4YpCt4FLQMYKZe9oeV/f4MSno= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.42.0 h1:uNgphsn75Tdz5Ji2q36v/nsFSfR/9BRFvqhGBaJGd5k= -golang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0= +golang.org/x/tools v0.44.0 h1:UP4ajHPIcuMjT1GqzDWRlalUEoY+uzoZKnhOjbIPD2c= +golang.org/x/tools v0.44.0/go.mod h1:KA0AfVErSdxRZIsOVipbv3rQhVXTnlU6UhKxHd1seDI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0= gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= -gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= -google.golang.org/genproto/googleapis/api v0.0.0-20260226221140-a57be14db171 h1:tu/dtnW1o3wfaxCOjSLn5IRX4YDcJrtlpzYkhHhGaC4= -google.golang.org/genproto/googleapis/api v0.0.0-20260226221140-a57be14db171/go.mod h1:M5krXqk4GhBKvB596udGL3UyjL4I1+cTbK0orROM9ng= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260226221140-a57be14db171 h1:ggcbiqK8WWh6l1dnltU4BgWGIGo+EVYxCaAPih/zQXQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260226221140-a57be14db171/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= -google.golang.org/grpc v1.79.1 h1:zGhSi45ODB9/p3VAawt9a+O/MULLl9dpizzNNpq7flY= -google.golang.org/grpc v1.79.1/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= +gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4= +gonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E= +google.golang.org/genproto/googleapis/api v0.0.0-20260414002931-afd174a4e478 h1:yQugLulqltosq0B/f8l4w9VryjV+N/5gcW0jQ3N8Qec= +google.golang.org/genproto/googleapis/api v0.0.0-20260414002931-afd174a4e478/go.mod h1:C6ADNqOxbgdUUeRTU+LCHDPB9ttAMCTff6auwCVa4uc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260414002931-afd174a4e478 h1:RmoJA1ujG+/lRGNfUnOMfhCy5EipVMyvUE+KNbPbTlw= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260414002931-afd174a4e478/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= +google.golang.org/grpc v1.80.0 h1:Xr6m2WmWZLETvUNvIUmeD5OAagMw3FiKmMlTdViWsHM= +google.golang.org/grpc v1.80.0/go.mod h1:ho/dLnxwi3EDJA4Zghp7k2Ec1+c2jqup0bFkw07bwF4= google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= @@ -515,8 +519,8 @@ k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-aggregator v0.34.1 h1:WNLV0dVNoFKmuyvdWLd92iDSyD/TSTjqwaPj0U9XAEU= k8s.io/kube-aggregator v0.34.1/go.mod h1:RU8j+5ERfp0h+gIvWtxRPfsa5nK7rboDm8RST8BJfYQ= -k8s.io/kube-openapi v0.0.0-20260127142750-a19766b6e2d4 h1:HhDfevmPS+OalTjQRKbTHppRIz01AWi8s45TMXStgYY= -k8s.io/kube-openapi v0.0.0-20260127142750-a19766b6e2d4/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= +k8s.io/kube-openapi v0.0.0-20260414162039-ec9c827d403f h1:4Qiq0YAoQATdgmHALJWz9rJ4fj20pB3xebpB4CFNhYM= +k8s.io/kube-openapi v0.0.0-20260414162039-ec9c827d403f/go.mod h1:uGBT7iTA6c6MvqUvSXIaYZo9ukscABYi2btjhvgKGZ0= k8s.io/utils v0.0.0-20260210185600-b8788abfbbc2 h1:AZYQSJemyQB5eRxqcPky+/7EdBj0xi3g0ZcxxJ7vbWU= k8s.io/utils v0.0.0-20260210185600-b8788abfbbc2/go.mod h1:xDxuJ0whA3d0I4mf/C4ppKHxXynQ+fxnkmQH0vTHnuk= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.34.0 h1:hSfpvjjTQXQY2Fol2CS0QHMNs/WI1MOSGzCm1KhM5ec= diff --git a/vendor/github.com/cilium/ebpf/CODEOWNERS b/vendor/github.com/cilium/ebpf/CODEOWNERS index bd0a61158..268129f08 100644 --- a/vendor/github.com/cilium/ebpf/CODEOWNERS +++ b/vendor/github.com/cilium/ebpf/CODEOWNERS @@ -1,11 +1,17 @@ * @cilium/ebpf-lib-maintainers -features/ @rgo3 -link/ @mmat11 +/features/ @rgo3 +/link/ @mmat11 -perf/ @florianl -ringbuf/ @florianl +/perf/ @florianl +/ringbuf/ @florianl -btf/ @dylandreimerink +/btf/ @dylandreimerink -docs/ @ti-mo +/docs/ @ti-mo + +# Windows specific code. +/docs/**/windows*.md @cilium/ebpf-go-windows-reviewers +/internal/efw @cilium/ebpf-go-windows-reviewers +windows/ @cilium/ebpf-go-windows-reviewers # Folders +*windows*.go @cilium/ebpf-go-windows-reviewers # Go code diff --git a/vendor/github.com/cilium/ebpf/Makefile b/vendor/github.com/cilium/ebpf/Makefile index 4f53b37f3..b108a959c 100644 --- a/vendor/github.com/cilium/ebpf/Makefile +++ b/vendor/github.com/cilium/ebpf/Makefile @@ -11,23 +11,41 @@ CI_KERNEL_URL ?= https://github.com/cilium/ci-kernels/raw/master/ # Obtain an absolute path to the directory of the Makefile. # Assume the Makefile is in the root of the repository. REPODIR := $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST)))) -UIDGID := $(shell stat -c '%u:%g' ${REPODIR}) # Prefer podman if installed, otherwise use docker. # Note: Setting the var at runtime will always override. -CONTAINER_ENGINE ?= $(if $(shell command -v podman), podman, docker) -CONTAINER_RUN_ARGS ?= $(if $(filter ${CONTAINER_ENGINE}, podman), \ - --log-driver=none \ - -v "$(shell go env GOCACHE)":/root/.cache/go-build \ - -v "$(shell go env GOMODCACHE)":/go/pkg/mod, --user "${UIDGID}") +CONTAINER_ENGINE ?= $(if $(shell command -v podman),podman,docker) + +# Configure container runtime arguments based on the container engine. +CONTAINER_RUN_ARGS := \ + --env MAKEFLAGS \ + --env BPF2GO_CC="$(CLANG)" \ + --env BPF2GO_CFLAGS="$(CFLAGS)" \ + --env HOME=/tmp \ + -v "${REPODIR}":/ebpf -w /ebpf \ + -v "$(shell go env GOCACHE)":/tmp/.cache/go-build \ + -v "$(shell go env GOPATH)":/go \ + -v "$(shell go env GOMODCACHE)":/go/pkg/mod + +ifeq ($(CONTAINER_ENGINE), podman) +CONTAINER_RUN_ARGS += --log-driver=none --security-opt label=disable +else +CONTAINER_RUN_ARGS += --user "$(shell stat -c '%u:%g' ${REPODIR})" +endif IMAGE := $(shell cat ${REPODIR}/testdata/docker/IMAGE) VERSION := $(shell cat ${REPODIR}/testdata/docker/VERSION) +TARGETS_EL := \ + testdata/linked1 \ + testdata/linked2 \ + testdata/linked + TARGETS := \ testdata/loader-clang-14 \ testdata/loader-clang-17 \ testdata/loader-$(CLANG) \ + testdata/loader_nobtf \ testdata/manyprogs \ testdata/btf_map_init \ testdata/invalid_map \ @@ -50,6 +68,7 @@ TARGETS := \ testdata/errors \ testdata/variables \ testdata/arena \ + testdata/struct_ops \ btf/testdata/relocs \ btf/testdata/relocs_read \ btf/testdata/relocs_read_tgt \ @@ -57,6 +76,8 @@ TARGETS := \ btf/testdata/tags \ cmd/bpf2go/testdata/minimal +HEADERS := $(wildcard testdata/*.h) + .PHONY: all clean container-all container-shell generate .DEFAULT_TARGET = container-all @@ -64,20 +85,13 @@ TARGETS := \ # Build all ELF binaries using a containerized LLVM toolchain. container-all: +${CONTAINER_ENGINE} run --rm -ti ${CONTAINER_RUN_ARGS} \ - -v "${REPODIR}":/ebpf -w /ebpf --env MAKEFLAGS \ - --env HOME="/tmp" \ - --env BPF2GO_CC="$(CLANG)" \ - --env BPF2GO_CFLAGS="$(CFLAGS)" \ "${IMAGE}:${VERSION}" \ - make all + $(MAKE) all # (debug) Drop the user into a shell inside the container as root. # Set BPF2GO_ envs to make 'make generate' just work. container-shell: ${CONTAINER_ENGINE} run --rm -ti ${CONTAINER_RUN_ARGS} \ - -v "${REPODIR}":/ebpf -w /ebpf \ - --env BPF2GO_CC="$(CLANG)" \ - --env BPF2GO_CFLAGS="$(CFLAGS)" \ "${IMAGE}:${VERSION}" clean: @@ -87,32 +101,44 @@ clean: format: find . -type f -name "*.c" | xargs clang-format -i -all: format $(addsuffix -el.elf,$(TARGETS)) $(addsuffix -eb.elf,$(TARGETS)) generate +all: format testdata update-external-deps ln -srf testdata/loader-$(CLANG)-el.elf testdata/loader-el.elf ln -srf testdata/loader-$(CLANG)-eb.elf testdata/loader-eb.elf + $(MAKE) generate generate: + go generate -run "stringer" ./... go generate -run "gentypes" ./... - go generate -skip "gentypes" ./... + go generate -skip "(gentypes|stringer)" ./... + +testdata: $(addsuffix -el.elf,$(TARGETS)) $(addsuffix -eb.elf,$(TARGETS)) $(addsuffix -el.elf,$(TARGETS_EL)) -testdata/loader-%-el.elf: testdata/loader.c +testdata/loader-%-el.elf: testdata/loader.c $(HEADERS) $* $(CFLAGS) -target bpfel -c $< -o $@ $(STRIP) -g $@ -testdata/loader-%-eb.elf: testdata/loader.c +testdata/loader-%-eb.elf: testdata/loader.c $(HEADERS) $* $(CFLAGS) -target bpfeb -c $< -o $@ $(STRIP) -g $@ -%-el.elf: %.c +testdata/loader_nobtf-el.elf: testdata/loader.c $(HEADERS) + $(CLANG) $(CFLAGS) -g0 -D__NOBTF__ -target bpfel -c $< -o $@ + +testdata/loader_nobtf-eb.elf: testdata/loader.c $(HEADERS) + $(CLANG) $(CFLAGS) -g0 -D__NOBTF__ -target bpfeb -c $< -o $@ + +%-el.elf: %.c $(HEADERS) $(CLANG) $(CFLAGS) -target bpfel -c $< -o $@ $(STRIP) -g $@ -%-eb.elf : %.c +%-eb.elf: %.c $(HEADERS) $(CLANG) $(CFLAGS) -target bpfeb -c $< -o $@ $(STRIP) -g $@ -.PHONY: update-kernel-deps -update-kernel-deps: export KERNEL_VERSION?=6.8 -update-kernel-deps: - ./testdata/sh/update-kernel-deps.sh - $(MAKE) container-all +testdata/linked-el.elf: testdata/linked1-el.elf testdata/linked2-el.elf + bpftool gen object $@ $^ + +.PHONY: update-external-deps +update-external-deps: + ./scripts/update-kernel-deps.sh + ./scripts/update-efw-deps.sh diff --git a/vendor/github.com/cilium/ebpf/asm/func_string.go b/vendor/github.com/cilium/ebpf/asm/func_string.go index d5d624f09..259837974 100644 --- a/vendor/github.com/cilium/ebpf/asm/func_string.go +++ b/vendor/github.com/cilium/ebpf/asm/func_string.go @@ -241,33 +241,36 @@ func _() { _ = x[WindowsFnGetCurrentPidTgid-268435475] _ = x[WindowsFnGetCurrentLogonId-268435476] _ = x[WindowsFnIsCurrentAdmin-268435477] - _ = x[WindowsFnMemcpy-268435478] - _ = x[WindowsFnMemcmp-268435479] + _ = x[WindowsFnMemcpyS-268435478] + _ = x[WindowsFnMemcmpS-268435479] _ = x[WindowsFnMemset-268435480] - _ = x[WindowsFnMemmove-268435481] + _ = x[WindowsFnMemmoveS-268435481] _ = x[WindowsFnGetSocketCookie-268435482] _ = x[WindowsFnStrncpyS-268435483] _ = x[WindowsFnStrncatS-268435484] _ = x[WindowsFnStrnlenS-268435485] _ = x[WindowsFnKtimeGetBootMs-268435486] _ = x[WindowsFnKtimeGetMs-268435487] + _ = x[WindowsFnPerfEventOutput-268435488] + _ = x[WindowsFnGetCurrentProcessStartKey-268435489] + _ = x[WindowsFnGetCurrentThreadCreateTime-268435490] } const ( _BuiltinFunc_name_0 = "FnUnspecFnMapLookupElemFnMapUpdateElemFnMapDeleteElemFnProbeReadFnKtimeGetNsFnTracePrintkFnGetPrandomU32FnGetSmpProcessorIdFnSkbStoreBytesFnL3CsumReplaceFnL4CsumReplaceFnTailCallFnCloneRedirectFnGetCurrentPidTgidFnGetCurrentUidGidFnGetCurrentCommFnGetCgroupClassidFnSkbVlanPushFnSkbVlanPopFnSkbGetTunnelKeyFnSkbSetTunnelKeyFnPerfEventReadFnRedirectFnGetRouteRealmFnPerfEventOutputFnSkbLoadBytesFnGetStackidFnCsumDiffFnSkbGetTunnelOptFnSkbSetTunnelOptFnSkbChangeProtoFnSkbChangeTypeFnSkbUnderCgroupFnGetHashRecalcFnGetCurrentTaskFnProbeWriteUserFnCurrentTaskUnderCgroupFnSkbChangeTailFnSkbPullDataFnCsumUpdateFnSetHashInvalidFnGetNumaNodeIdFnSkbChangeHeadFnXdpAdjustHeadFnProbeReadStrFnGetSocketCookieFnGetSocketUidFnSetHashFnSetsockoptFnSkbAdjustRoomFnRedirectMapFnSkRedirectMapFnSockMapUpdateFnXdpAdjustMetaFnPerfEventReadValueFnPerfProgReadValueFnGetsockoptFnOverrideReturnFnSockOpsCbFlagsSetFnMsgRedirectMapFnMsgApplyBytesFnMsgCorkBytesFnMsgPullDataFnBindFnXdpAdjustTailFnSkbGetXfrmStateFnGetStackFnSkbLoadBytesRelativeFnFibLookupFnSockHashUpdateFnMsgRedirectHashFnSkRedirectHashFnLwtPushEncapFnLwtSeg6StoreBytesFnLwtSeg6AdjustSrhFnLwtSeg6ActionFnRcRepeatFnRcKeydownFnSkbCgroupIdFnGetCurrentCgroupIdFnGetLocalStorageFnSkSelectReuseportFnSkbAncestorCgroupIdFnSkLookupTcpFnSkLookupUdpFnSkReleaseFnMapPushElemFnMapPopElemFnMapPeekElemFnMsgPushDataFnMsgPopDataFnRcPointerRelFnSpinLockFnSpinUnlockFnSkFullsockFnTcpSockFnSkbEcnSetCeFnGetListenerSockFnSkcLookupTcpFnTcpCheckSyncookieFnSysctlGetNameFnSysctlGetCurrentValueFnSysctlGetNewValueFnSysctlSetNewValueFnStrtolFnStrtoulFnSkStorageGetFnSkStorageDeleteFnSendSignalFnTcpGenSyncookieFnSkbOutputFnProbeReadUserFnProbeReadKernelFnProbeReadUserStrFnProbeReadKernelStrFnTcpSendAckFnSendSignalThreadFnJiffies64FnReadBranchRecordsFnGetNsCurrentPidTgidFnXdpOutputFnGetNetnsCookieFnGetCurrentAncestorCgroupIdFnSkAssignFnKtimeGetBootNsFnSeqPrintfFnSeqWriteFnSkCgroupIdFnSkAncestorCgroupIdFnRingbufOutputFnRingbufReserveFnRingbufSubmitFnRingbufDiscardFnRingbufQueryFnCsumLevelFnSkcToTcp6SockFnSkcToTcpSockFnSkcToTcpTimewaitSockFnSkcToTcpRequestSockFnSkcToUdp6SockFnGetTaskStackFnLoadHdrOptFnStoreHdrOptFnReserveHdrOptFnInodeStorageGetFnInodeStorageDeleteFnDPathFnCopyFromUserFnSnprintfBtfFnSeqPrintfBtfFnSkbCgroupClassidFnRedirectNeighFnPerCpuPtrFnThisCpuPtrFnRedirectPeerFnTaskStorageGetFnTaskStorageDeleteFnGetCurrentTaskBtfFnBprmOptsSetFnKtimeGetCoarseNsFnImaInodeHashFnSockFromFileFnCheckMtuFnForEachMapElemFnSnprintfFnSysBpfFnBtfFindByNameKindFnSysCloseFnTimerInitFnTimerSetCallbackFnTimerStartFnTimerCancelFnGetFuncIpFnGetAttachCookieFnTaskPtRegsFnGetBranchSnapshotFnTraceVprintkFnSkcToUnixSockFnKallsymsLookupNameFnFindVmaFnLoopFnStrncmpFnGetFuncArgFnGetFuncRetFnGetFuncArgCntFnGetRetvalFnSetRetvalFnXdpGetBuffLenFnXdpLoadBytesFnXdpStoreBytesFnCopyFromUserTaskFnSkbSetTstampFnImaFileHashFnKptrXchgFnMapLookupPercpuElemFnSkcToMptcpSockFnDynptrFromMemFnRingbufReserveDynptrFnRingbufSubmitDynptrFnRingbufDiscardDynptrFnDynptrReadFnDynptrWriteFnDynptrDataFnTcpRawGenSyncookieIpv4FnTcpRawGenSyncookieIpv6FnTcpRawCheckSyncookieIpv4FnTcpRawCheckSyncookieIpv6FnKtimeGetTaiNsFnUserRingbufDrainFnCgrpStorageGetFnCgrpStorageDelete" - _BuiltinFunc_name_1 = "WindowsFnMapLookupElemWindowsFnMapUpdateElemWindowsFnMapDeleteElemWindowsFnMapLookupAndDeleteElemWindowsFnTailCallWindowsFnGetPrandomU32WindowsFnKtimeGetBootNsWindowsFnGetSmpProcessorIdWindowsFnKtimeGetNsWindowsFnCsumDiffWindowsFnRingbufOutputWindowsFnTracePrintk2WindowsFnTracePrintk3WindowsFnTracePrintk4WindowsFnTracePrintk5WindowsFnMapPushElemWindowsFnMapPopElemWindowsFnMapPeekElemWindowsFnGetCurrentPidTgidWindowsFnGetCurrentLogonIdWindowsFnIsCurrentAdminWindowsFnMemcpyWindowsFnMemcmpWindowsFnMemsetWindowsFnMemmoveWindowsFnGetSocketCookieWindowsFnStrncpySWindowsFnStrncatSWindowsFnStrnlenSWindowsFnKtimeGetBootMsWindowsFnKtimeGetMs" + _BuiltinFunc_name_1 = "WindowsFnMapLookupElemWindowsFnMapUpdateElemWindowsFnMapDeleteElemWindowsFnMapLookupAndDeleteElemWindowsFnTailCallWindowsFnGetPrandomU32WindowsFnKtimeGetBootNsWindowsFnGetSmpProcessorIdWindowsFnKtimeGetNsWindowsFnCsumDiffWindowsFnRingbufOutputWindowsFnTracePrintk2WindowsFnTracePrintk3WindowsFnTracePrintk4WindowsFnTracePrintk5WindowsFnMapPushElemWindowsFnMapPopElemWindowsFnMapPeekElemWindowsFnGetCurrentPidTgidWindowsFnGetCurrentLogonIdWindowsFnIsCurrentAdminWindowsFnMemcpySWindowsFnMemcmpSWindowsFnMemsetWindowsFnMemmoveSWindowsFnGetSocketCookieWindowsFnStrncpySWindowsFnStrncatSWindowsFnStrnlenSWindowsFnKtimeGetBootMsWindowsFnKtimeGetMsWindowsFnPerfEventOutputWindowsFnGetCurrentProcessStartKeyWindowsFnGetCurrentThreadCreateTime" ) var ( _BuiltinFunc_index_0 = [...]uint16{0, 8, 23, 38, 53, 64, 76, 89, 104, 123, 138, 153, 168, 178, 193, 212, 230, 246, 264, 277, 289, 306, 323, 338, 348, 363, 380, 394, 406, 416, 433, 450, 466, 481, 497, 512, 528, 544, 568, 583, 596, 608, 624, 639, 654, 669, 683, 700, 714, 723, 735, 750, 763, 778, 793, 808, 828, 847, 859, 875, 894, 910, 925, 939, 952, 958, 973, 990, 1000, 1022, 1033, 1049, 1066, 1082, 1096, 1115, 1133, 1148, 1158, 1169, 1182, 1202, 1219, 1238, 1259, 1272, 1285, 1296, 1309, 1321, 1334, 1347, 1359, 1373, 1383, 1395, 1407, 1416, 1429, 1446, 1460, 1479, 1494, 1517, 1536, 1555, 1563, 1572, 1586, 1603, 1615, 1632, 1643, 1658, 1675, 1693, 1713, 1725, 1743, 1754, 1773, 1794, 1805, 1821, 1849, 1859, 1875, 1886, 1896, 1908, 1928, 1943, 1959, 1974, 1990, 2004, 2015, 2030, 2044, 2066, 2087, 2102, 2116, 2128, 2141, 2156, 2173, 2193, 2200, 2214, 2227, 2241, 2259, 2274, 2285, 2297, 2311, 2327, 2346, 2365, 2378, 2396, 2410, 2424, 2434, 2450, 2460, 2468, 2487, 2497, 2508, 2526, 2538, 2551, 2562, 2579, 2591, 2610, 2624, 2639, 2659, 2668, 2674, 2683, 2695, 2707, 2722, 2733, 2744, 2759, 2773, 2788, 2806, 2820, 2833, 2843, 2864, 2880, 2895, 2917, 2938, 2960, 2972, 2985, 2997, 3021, 3045, 3071, 3097, 3112, 3130, 3146, 3165} - _BuiltinFunc_index_1 = [...]uint16{0, 22, 44, 66, 97, 114, 136, 159, 185, 204, 221, 243, 264, 285, 306, 327, 347, 366, 386, 412, 438, 461, 476, 491, 506, 522, 546, 563, 580, 597, 620, 639} + _BuiltinFunc_index_1 = [...]uint16{0, 22, 44, 66, 97, 114, 136, 159, 185, 204, 221, 243, 264, 285, 306, 327, 347, 366, 386, 412, 438, 461, 477, 493, 508, 525, 549, 566, 583, 600, 623, 642, 666, 700, 735} ) func (i BuiltinFunc) String() string { switch { case i <= 211: return _BuiltinFunc_name_0[_BuiltinFunc_index_0[i]:_BuiltinFunc_index_0[i+1]] - case 268435457 <= i && i <= 268435487: + case 268435457 <= i && i <= 268435490: i -= 268435457 return _BuiltinFunc_name_1[_BuiltinFunc_index_1[i]:_BuiltinFunc_index_1[i+1]] default: diff --git a/vendor/github.com/cilium/ebpf/asm/func_win.go b/vendor/github.com/cilium/ebpf/asm/func_win.go index b016f0086..609ed9056 100644 --- a/vendor/github.com/cilium/ebpf/asm/func_win.go +++ b/vendor/github.com/cilium/ebpf/asm/func_win.go @@ -1,44 +1,45 @@ -// Code generated by internal/cmd/genwinfunctions.awk; DO NOT EDIT. +// Code generated by internal/cmd/genwinfunctions.awk; DO NOT EDIT. package asm // Code in this file is derived from eBPF for Windows, available under the MIT License. -import ( - "github.com/cilium/ebpf/internal/platform" -) +import "github.com/cilium/ebpf/internal/platform" // Built-in functions (Windows). const ( - WindowsFnMapLookupElem = BuiltinFunc(platform.WindowsTag | 1) - WindowsFnMapUpdateElem = BuiltinFunc(platform.WindowsTag | 2) - WindowsFnMapDeleteElem = BuiltinFunc(platform.WindowsTag | 3) - WindowsFnMapLookupAndDeleteElem = BuiltinFunc(platform.WindowsTag | 4) - WindowsFnTailCall = BuiltinFunc(platform.WindowsTag | 5) - WindowsFnGetPrandomU32 = BuiltinFunc(platform.WindowsTag | 6) - WindowsFnKtimeGetBootNs = BuiltinFunc(platform.WindowsTag | 7) - WindowsFnGetSmpProcessorId = BuiltinFunc(platform.WindowsTag | 8) - WindowsFnKtimeGetNs = BuiltinFunc(platform.WindowsTag | 9) - WindowsFnCsumDiff = BuiltinFunc(platform.WindowsTag | 10) - WindowsFnRingbufOutput = BuiltinFunc(platform.WindowsTag | 11) - WindowsFnTracePrintk2 = BuiltinFunc(platform.WindowsTag | 12) - WindowsFnTracePrintk3 = BuiltinFunc(platform.WindowsTag | 13) - WindowsFnTracePrintk4 = BuiltinFunc(platform.WindowsTag | 14) - WindowsFnTracePrintk5 = BuiltinFunc(platform.WindowsTag | 15) - WindowsFnMapPushElem = BuiltinFunc(platform.WindowsTag | 16) - WindowsFnMapPopElem = BuiltinFunc(platform.WindowsTag | 17) - WindowsFnMapPeekElem = BuiltinFunc(platform.WindowsTag | 18) - WindowsFnGetCurrentPidTgid = BuiltinFunc(platform.WindowsTag | 19) - WindowsFnGetCurrentLogonId = BuiltinFunc(platform.WindowsTag | 20) - WindowsFnIsCurrentAdmin = BuiltinFunc(platform.WindowsTag | 21) - WindowsFnMemcpy = BuiltinFunc(platform.WindowsTag | 22) - WindowsFnMemcmp = BuiltinFunc(platform.WindowsTag | 23) - WindowsFnMemset = BuiltinFunc(platform.WindowsTag | 24) - WindowsFnMemmove = BuiltinFunc(platform.WindowsTag | 25) - WindowsFnGetSocketCookie = BuiltinFunc(platform.WindowsTag | 26) - WindowsFnStrncpyS = BuiltinFunc(platform.WindowsTag | 27) - WindowsFnStrncatS = BuiltinFunc(platform.WindowsTag | 28) - WindowsFnStrnlenS = BuiltinFunc(platform.WindowsTag | 29) - WindowsFnKtimeGetBootMs = BuiltinFunc(platform.WindowsTag | 30) - WindowsFnKtimeGetMs = BuiltinFunc(platform.WindowsTag | 31) + WindowsFnMapLookupElem = BuiltinFunc(platform.WindowsTag | 1) + WindowsFnMapUpdateElem = BuiltinFunc(platform.WindowsTag | 2) + WindowsFnMapDeleteElem = BuiltinFunc(platform.WindowsTag | 3) + WindowsFnMapLookupAndDeleteElem = BuiltinFunc(platform.WindowsTag | 4) + WindowsFnTailCall = BuiltinFunc(platform.WindowsTag | 5) + WindowsFnGetPrandomU32 = BuiltinFunc(platform.WindowsTag | 6) + WindowsFnKtimeGetBootNs = BuiltinFunc(platform.WindowsTag | 7) + WindowsFnGetSmpProcessorId = BuiltinFunc(platform.WindowsTag | 8) + WindowsFnKtimeGetNs = BuiltinFunc(platform.WindowsTag | 9) + WindowsFnCsumDiff = BuiltinFunc(platform.WindowsTag | 10) + WindowsFnRingbufOutput = BuiltinFunc(platform.WindowsTag | 11) + WindowsFnTracePrintk2 = BuiltinFunc(platform.WindowsTag | 12) + WindowsFnTracePrintk3 = BuiltinFunc(platform.WindowsTag | 13) + WindowsFnTracePrintk4 = BuiltinFunc(platform.WindowsTag | 14) + WindowsFnTracePrintk5 = BuiltinFunc(platform.WindowsTag | 15) + WindowsFnMapPushElem = BuiltinFunc(platform.WindowsTag | 16) + WindowsFnMapPopElem = BuiltinFunc(platform.WindowsTag | 17) + WindowsFnMapPeekElem = BuiltinFunc(platform.WindowsTag | 18) + WindowsFnGetCurrentPidTgid = BuiltinFunc(platform.WindowsTag | 19) + WindowsFnGetCurrentLogonId = BuiltinFunc(platform.WindowsTag | 20) + WindowsFnIsCurrentAdmin = BuiltinFunc(platform.WindowsTag | 21) + WindowsFnMemcpyS = BuiltinFunc(platform.WindowsTag | 22) + WindowsFnMemcmpS = BuiltinFunc(platform.WindowsTag | 23) + WindowsFnMemset = BuiltinFunc(platform.WindowsTag | 24) + WindowsFnMemmoveS = BuiltinFunc(platform.WindowsTag | 25) + WindowsFnGetSocketCookie = BuiltinFunc(platform.WindowsTag | 26) + WindowsFnStrncpyS = BuiltinFunc(platform.WindowsTag | 27) + WindowsFnStrncatS = BuiltinFunc(platform.WindowsTag | 28) + WindowsFnStrnlenS = BuiltinFunc(platform.WindowsTag | 29) + WindowsFnKtimeGetBootMs = BuiltinFunc(platform.WindowsTag | 30) + WindowsFnKtimeGetMs = BuiltinFunc(platform.WindowsTag | 31) + WindowsFnPerfEventOutput = BuiltinFunc(platform.WindowsTag | 32) + WindowsFnGetCurrentProcessStartKey = BuiltinFunc(platform.WindowsTag | 33) + WindowsFnGetCurrentThreadCreateTime = BuiltinFunc(platform.WindowsTag | 34) ) diff --git a/vendor/github.com/cilium/ebpf/asm/instruction.go b/vendor/github.com/cilium/ebpf/asm/instruction.go index b2ce72ca8..627d403d8 100644 --- a/vendor/github.com/cilium/ebpf/asm/instruction.go +++ b/vendor/github.com/cilium/ebpf/asm/instruction.go @@ -2,10 +2,12 @@ package asm import ( "crypto/sha1" + "crypto/sha256" "encoding/binary" "encoding/hex" "errors" "fmt" + "hash" "io" "math" "sort" @@ -43,6 +45,14 @@ type Instruction struct { Metadata Metadata } +// Width returns how many raw BPF instructions the Instruction occupies within +// an instruction stream. For example, an Instruction encoding a 64-bit value +// will typically occupy 2 raw instructions, while a 32-bit constant can be +// encoded in a single raw instruction. +func (ins *Instruction) Width() RawInstructionOffset { + return RawInstructionOffset(ins.OpCode.rawInstructions()) +} + // Unmarshal decodes a BPF instruction. func (ins *Instruction) Unmarshal(r io.Reader, bo binary.ByteOrder, platform string) error { data := make([]byte, InstructionSize) @@ -233,22 +243,6 @@ func (ins *Instruction) AssociateMap(m FDer) error { return nil } -// RewriteMapPtr changes an instruction to use a new map fd. -// -// Returns an error if the instruction doesn't load a map. -// -// Deprecated: use AssociateMap instead. If you cannot provide a Map, -// wrap an fd in a type implementing FDer. -func (ins *Instruction) RewriteMapPtr(fd int) error { - if !ins.IsLoadFromMap() { - return errors.New("not a load from a map") - } - - ins.encodeMapFD(fd) - - return nil -} - func (ins *Instruction) encodeMapFD(fd int) { // Preserve the offset value for direct map loads. offset := uint64(ins.Constant) & (math.MaxUint32 << 32) @@ -256,22 +250,6 @@ func (ins *Instruction) encodeMapFD(fd int) { ins.Constant = int64(offset | rawFd) } -// MapPtr returns the map fd for this instruction. -// -// The result is undefined if the instruction is not a load from a map, -// see IsLoadFromMap. -// -// Deprecated: use Map() instead. -func (ins *Instruction) MapPtr() int { - // If there is a map associated with the instruction, return its FD. - if fd := ins.Metadata.Get(mapMeta{}); fd != nil { - return fd.(FDer).FD() - } - - // Fall back to the fd stored in the Constant field - return ins.mapFd() -} - // mapFd returns the map file descriptor stored in the 32 least significant // bits of ins' Constant field. func (ins *Instruction) mapFd() int { @@ -486,13 +464,6 @@ func (ins Instruction) WithSymbol(name string) Instruction { return ins } -// Sym creates a symbol. -// -// Deprecated: use WithSymbol instead. -func (ins Instruction) Sym(name string) Instruction { - return ins.WithSymbol(name) -} - // Symbol returns the value ins has been marked with using WithSymbol, // otherwise returns an empty string. A symbol is often an Instruction // at the start of a function body. @@ -632,39 +603,6 @@ func (insns Instructions) AssociateMap(symbol string, m FDer) error { return nil } -// RewriteMapPtr rewrites all loads of a specific map pointer to a new fd. -// -// Returns ErrUnreferencedSymbol if the symbol isn't used. -// -// Deprecated: use AssociateMap instead. -func (insns Instructions) RewriteMapPtr(symbol string, fd int) error { - if symbol == "" { - return errors.New("empty symbol") - } - - var found bool - for i := range insns { - ins := &insns[i] - if ins.Reference() != symbol { - continue - } - - if !ins.IsLoadFromMap() { - return errors.New("not a load from a map") - } - - ins.encodeMapFD(fd) - - found = true - } - - if !found { - return fmt.Errorf("symbol %s: %w", symbol, ErrUnreferencedSymbol) - } - - return nil -} - // SymbolOffsets returns the set of symbols and their offset in // the instructions. func (insns Instructions) SymbolOffsets() (map[string]int, error) { @@ -817,18 +755,72 @@ func (insns Instructions) Marshal(w io.Writer, bo binary.ByteOrder) error { // It mirrors bpf_prog_calc_tag in the kernel and so can be compared // to ProgramInfo.Tag to figure out whether a loaded program matches // certain instructions. +// +// Deprecated: The value produced by this method no longer matches tags produced +// by the kernel since Linux 6.18. Use [Instructions.HasTag] instead. func (insns Instructions) Tag(bo binary.ByteOrder) (string, error) { + // We cannot determine which hashing function to use without probing the kernel. + // So use the legacy SHA-1 implementation and deprecate this method. + return insns.tagSha1(bo) +} + +// HasTag returns true if the given tag matches the kernel tag of insns. +func (insns Instructions) HasTag(tag string, bo binary.ByteOrder) (bool, error) { + sha256Tag, err := insns.tagSha256(bo) + if err != nil { + return false, fmt.Errorf("hashing sha256: %w", err) + } + if tag == sha256Tag { + return true, nil + } + + sha1Tag, err := insns.tagSha1(bo) + if err != nil { + return false, fmt.Errorf("hashing sha1: %w", err) + } + + return tag == sha1Tag, nil +} + +// tagSha1 calculates the kernel tag for a series of instructions. +// +// It mirrors bpf_prog_calc_tag in kernels up to v6.18 and can be compared to +// ProgramInfo.Tag to figure out whether a loaded Program matches insns. +func (insns Instructions) tagSha1(bo binary.ByteOrder) (string, error) { h := sha1.New() + if err := insns.hash(h, bo); err != nil { + return "", err + } + return hex.EncodeToString(h.Sum(nil)[:sys.BPF_TAG_SIZE]), nil +} + +// tagSha256 calculates the kernel tag for a series of instructions. +// +// It mirrors bpf_prog_calc_tag in the kernel and can be compared to +// ProgramInfo.Tag to figure out whether a loaded Program matches insns. +func (insns Instructions) tagSha256(bo binary.ByteOrder) (string, error) { + h := sha256.New() + if err := insns.hash(h, bo); err != nil { + return "", err + } + return hex.EncodeToString(h.Sum(nil)[:sys.BPF_TAG_SIZE]), nil +} + +// hash calculates the hash of the instruction stream. Map load instructions +// are zeroed out, since these contain map file descriptors or pointers to +// maps, which will be different from load to load and would make the hash +// non-deterministic. +func (insns Instructions) hash(h hash.Hash, bo binary.ByteOrder) error { for i, ins := range insns { if ins.IsLoadFromMap() { ins.Constant = 0 } _, err := ins.Marshal(h, bo) if err != nil { - return "", fmt.Errorf("instruction %d: %w", i, err) + return fmt.Errorf("instruction %d: %w", i, err) } } - return hex.EncodeToString(h.Sum(nil)[:sys.BPF_TAG_SIZE]), nil + return nil } // encodeFunctionReferences populates the Offset (or Constant, depending on @@ -968,11 +960,3 @@ func newBPFRegisters(dst, src Register, bo binary.ByteOrder) (bpfRegisters, erro return 0, fmt.Errorf("unrecognized ByteOrder %T", bo) } } - -// IsUnreferencedSymbol returns true if err was caused by -// an unreferenced symbol. -// -// Deprecated: use errors.Is(err, asm.ErrUnreferencedSymbol). -func IsUnreferencedSymbol(err error) bool { - return errors.Is(err, ErrUnreferencedSymbol) -} diff --git a/vendor/github.com/cilium/ebpf/btf/dedup.go b/vendor/github.com/cilium/ebpf/btf/dedup.go new file mode 100644 index 000000000..783ea09e8 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/dedup.go @@ -0,0 +1,633 @@ +package btf + +import ( + "errors" + "fmt" + "hash/maphash" + "slices" +) + +// deduper deduplicates BTF types by finding all types in a Type graph that are +// Equivalent and replaces them with a single instance. +// +// See doc comments in types.go to understand the various ways in which Types +// can relate to each other and how they are compared for equality. We separate +// Identity (same memory location), Equivalence (same shape/layout), and +// Compatibility (CO-RE compatible) to be explicit about intent. +// +// This deduper opportunistically uses a combination of Identity and Equivalence +// to find types that can be deduplicated. +type deduper struct { + visited map[Type]struct{} + hashCache map[hashCacheKey]uint64 + + // Set of types that have been deduplicated. + done map[Type]Type + + // Map of hash to types with that hash. + hashed map[uint64][]Type + eqCache map[typKey]bool + + seed maphash.Seed +} + +func newDeduper() *deduper { + return &deduper{ + make(map[Type]struct{}), + make(map[hashCacheKey]uint64), + make(map[Type]Type), + make(map[uint64][]Type), + make(map[typKey]bool), + maphash.MakeSeed(), + } +} + +func (d *deduper) deduplicate(t Type) (Type, error) { + // If we have already attempted to deduplicate this exact type, return the + // result. + if done, ok := d.done[t]; ok { + return done, nil + } + + // Visit the subtree, if a type has children, attempt to replace it with a + // deduplicated version of those children. + for t := range postorder(t, d.visited) { + for c := range children(t) { + var err error + *c, err = d.hashInsert(*c) + if err != nil { + return nil, err + } + } + } + + // Finally, deduplicate the root type itself. + return d.hashInsert(t) +} + +// hashInsert attempts to deduplicate t by hashing it and comparing against +// other types with the same hash. Returns the Type to be used as the common +// substitute at this position in the graph. +func (d *deduper) hashInsert(t Type) (Type, error) { + // If we have deduplicated this type before, return the result of that + // deduplication. + if done, ok := d.done[t]; ok { + return done, nil + } + + // Compute the hash of this type. Types with the same hash are candidates for + // deduplication. + hash, err := d.hash(t, -1) + if err != nil { + return nil, err + } + + // A hash collision is possible, so we need to compare against all candidates + // with the same hash. + for _, candidate := range d.hashed[hash] { + // Pre-size the visited slice, experimentation on VMLinux shows a capacity + // of 16 to give the best performance. + const visitedCapacity = 16 + err := d.typesEquivalent(candidate, t, make([]Type, 0, visitedCapacity)) + if errors.Is(err, errNotEquivalent) { + continue + } + if err != nil { + return nil, err + } + + // Found a Type that's both Equivalent and hashes to the same value, choose + // it as the deduplicated version. + d.done[t] = candidate + + return candidate, nil + } + + d.hashed[hash] = append(d.hashed[hash], t) + + return t, nil +} + +// The hash of a Type is the same given its pointer and depth budget. +type hashCacheKey struct { + t Type + depthBudget int +} + +// hash computes a hash for t. The produced hash is the same for Types which +// are similar. The hash can collide such that two different Types may produce +// the same hash, so equivalence must be checked explicitly. It will recurse +// into children. The initial call should use a depthBudget of -1. +func (d *deduper) hash(t Type, depthBudget int) (uint64, error) { + if depthBudget == 0 { + return 0, nil + } + + h := &maphash.Hash{} + h.SetSeed(d.seed) + + switch t := t.(type) { + case *Void: + maphash.WriteComparable(h, kindUnknown) + + case *Int: + maphash.WriteComparable(h, kindInt) + maphash.WriteComparable(h, *t) + + case *Pointer: + maphash.WriteComparable(h, kindPointer) + // If the depth budget is positive, decrement it every time we follow a + // pointer. + if depthBudget > 0 { + depthBudget-- + } + + // If this is the first time we are following a pointer, set the depth + // budget. This limits amount of recursion we do when hashing pointers that + // form cycles. This is cheaper than tracking visited types and works + // because hash collisions are allowed. + if depthBudget < 0 { + depthBudget = 1 + + // Double pointers are common in C. However, with a depth budget of 1, all + // double pointers would hash the same, causing a performance issue when + // checking equivalence. So we give double pointers a bit more budget. + if _, ok := t.Target.(*Pointer); ok { + depthBudget = 2 + } + } + sub, err := d.hash(t.Target, depthBudget) + if err != nil { + return 0, err + } + maphash.WriteComparable(h, sub) + + case *Array: + maphash.WriteComparable(h, kindArray) + maphash.WriteComparable(h, t.Nelems) + sub, err := d.hash(t.Index, depthBudget) + if err != nil { + return 0, err + } + maphash.WriteComparable(h, sub) + _, err = d.hash(t.Type, depthBudget) + if err != nil { + return 0, err + } + maphash.WriteComparable(h, sub) + + case *Struct, *Union: + // Check the cache to avoid recomputing the hash for this type and depth + // budget. + key := hashCacheKey{t, depthBudget} + if cached, ok := d.hashCache[key]; ok { + return cached, nil + } + + var members []Member + switch t := t.(type) { + case *Struct: + maphash.WriteComparable(h, kindStruct) + maphash.WriteComparable(h, t.Name) + maphash.WriteComparable(h, t.Size) + members = t.Members + + case *Union: + maphash.WriteComparable(h, kindUnion) + maphash.WriteComparable(h, t.Name) + maphash.WriteComparable(h, t.Size) + members = t.Members + } + + maphash.WriteComparable(h, len(members)) + for _, m := range members { + maphash.WriteComparable(h, m.Name) + maphash.WriteComparable(h, m.Offset) + sub, err := d.hash(m.Type, depthBudget) + if err != nil { + return 0, err + } + maphash.WriteComparable(h, sub) + } + + sum := h.Sum64() + d.hashCache[key] = sum + return sum, nil + + case *Enum: + maphash.WriteComparable(h, kindEnum) + maphash.WriteComparable(h, t.Name) + maphash.WriteComparable(h, t.Size) + maphash.WriteComparable(h, t.Signed) + for _, v := range t.Values { + maphash.WriteComparable(h, v) + } + + case *Fwd: + maphash.WriteComparable(h, kindForward) + maphash.WriteComparable(h, *t) + + case *Typedef: + maphash.WriteComparable(h, kindTypedef) + maphash.WriteComparable(h, t.Name) + sub, err := d.hash(t.Type, depthBudget) + if err != nil { + return 0, err + } + maphash.WriteComparable(h, sub) + + case *Volatile: + maphash.WriteComparable(h, kindVolatile) + sub, err := d.hash(t.Type, depthBudget) + if err != nil { + return 0, err + } + maphash.WriteComparable(h, sub) + + case *Const: + maphash.WriteComparable(h, kindConst) + sub, err := d.hash(t.Type, depthBudget) + if err != nil { + return 0, err + } + maphash.WriteComparable(h, sub) + + case *Restrict: + maphash.WriteComparable(h, kindRestrict) + sub, err := d.hash(t.Type, depthBudget) + if err != nil { + return 0, err + } + maphash.WriteComparable(h, sub) + + case *Func: + maphash.WriteComparable(h, kindFunc) + maphash.WriteComparable(h, t.Name) + sub, err := d.hash(t.Type, depthBudget) + if err != nil { + return 0, err + } + maphash.WriteComparable(h, sub) + + case *FuncProto: + // It turns out that pointers to function prototypes are common in C code, + // function pointers. Function prototypes frequently have similar patterns + // of [ptr, ptr] -> int, or [ptr, ptr, ptr] -> int. Causing frequent hash + // collisions, for the default depth budget of 1. So allow one additional + // level of pointers when we encounter a function prototype. + if depthBudget >= 0 { + depthBudget++ + } + + maphash.WriteComparable(h, kindFuncProto) + for _, p := range t.Params { + maphash.WriteComparable(h, p.Name) + sub, err := d.hash(p.Type, depthBudget) + if err != nil { + return 0, err + } + maphash.WriteComparable(h, sub) + } + sub, err := d.hash(t.Return, depthBudget) + if err != nil { + return 0, err + } + maphash.WriteComparable(h, sub) + + case *Var: + maphash.WriteComparable(h, kindVar) + maphash.WriteComparable(h, t.Name) + maphash.WriteComparable(h, t.Linkage) + sub, err := d.hash(t.Type, depthBudget) + if err != nil { + return 0, err + } + maphash.WriteComparable(h, sub) + + case *Datasec: + maphash.WriteComparable(h, kindDatasec) + maphash.WriteComparable(h, t.Name) + for _, v := range t.Vars { + maphash.WriteComparable(h, v.Offset) + maphash.WriteComparable(h, v.Size) + sub, err := d.hash(v.Type, depthBudget) + if err != nil { + return 0, err + } + maphash.WriteComparable(h, sub) + } + + case *declTag: + maphash.WriteComparable(h, kindDeclTag) + maphash.WriteComparable(h, t.Value) + maphash.WriteComparable(h, t.Index) + sub, err := d.hash(t.Type, depthBudget) + if err != nil { + return 0, err + } + maphash.WriteComparable(h, sub) + + case *TypeTag: + maphash.WriteComparable(h, kindTypeTag) + maphash.WriteComparable(h, t.Value) + sub, err := d.hash(t.Type, depthBudget) + if err != nil { + return 0, err + } + maphash.WriteComparable(h, sub) + + case *Float: + maphash.WriteComparable(h, kindFloat) + maphash.WriteComparable(h, *t) + + default: + return 0, fmt.Errorf("unsupported type for hashing: %T", t) + } + + return h.Sum64(), nil +} + +type typKey struct { + a Type + b Type +} + +var errNotEquivalent = errors.New("types are not equivalent") + +// typesEquivalent checks if two types are Equivalent. +func (d *deduper) typesEquivalent(ta, tb Type, visited []Type) error { + // Fast path: if Types are Identical, they are also Equivalent. + if ta == tb { + return nil + } + + switch a := ta.(type) { + case *Void: + if _, ok := tb.(*Void); ok { + return nil + } + return errNotEquivalent + + case *Int: + b, ok := tb.(*Int) + if !ok { + return errNotEquivalent + } + if a.Name != b.Name || a.Size != b.Size || a.Encoding != b.Encoding { + return errNotEquivalent + } + return nil + + case *Enum: + b, ok := tb.(*Enum) + if !ok { + return errNotEquivalent + } + if a.Name != b.Name || len(a.Values) != len(b.Values) { + return errNotEquivalent + } + for i := range a.Values { + if a.Values[i].Name != b.Values[i].Name || a.Values[i].Value != b.Values[i].Value { + return errNotEquivalent + } + } + return nil + + case *Fwd: + b, ok := tb.(*Fwd) + if !ok { + return errNotEquivalent + } + if a.Name != b.Name || a.Kind != b.Kind { + return errNotEquivalent + } + return nil + + case *Float: + b, ok := tb.(*Float) + if !ok { + return errNotEquivalent + } + if a.Name != b.Name || a.Size != b.Size { + return errNotEquivalent + } + return nil + + case *Array: + b, ok := tb.(*Array) + if !ok { + return errNotEquivalent + } + + if a.Nelems != b.Nelems { + return errNotEquivalent + } + if err := d.typesEquivalent(a.Index, b.Index, visited); err != nil { + return err + } + if err := d.typesEquivalent(a.Type, b.Type, visited); err != nil { + return err + } + return nil + + case *Pointer: + b, ok := tb.(*Pointer) + if !ok { + return errNotEquivalent + } + + // Detect cycles by tracking visited types. Assume types are Equivalent if + // we have already visited this type in the current Equivalence check. + if slices.Contains(visited, ta) { + return nil + } + visited = append(visited, ta) + + return d.typesEquivalent(a.Target, b.Target, visited) + + case *Struct, *Union: + // Use a cache to avoid recomputation. We only do this for composite types + // since they are where types fan out the most. For other types, the + // overhead of the lookup and update outweighs performance benefits. + cacheKey := typKey{a: ta, b: tb} + if equal, ok := d.eqCache[cacheKey]; ok { + if equal { + return nil + } + return errNotEquivalent + } + + compErr := d.compositeEquivalent(ta, tb, visited) + d.eqCache[cacheKey] = compErr == nil + + return compErr + + case *Typedef: + b, ok := tb.(*Typedef) + if !ok { + return errNotEquivalent + } + if a.Name != b.Name { + return errNotEquivalent + } + return d.typesEquivalent(a.Type, b.Type, visited) + + case *Volatile: + b, ok := tb.(*Volatile) + if !ok { + return errNotEquivalent + } + return d.typesEquivalent(a.Type, b.Type, visited) + + case *Const: + b, ok := tb.(*Const) + if !ok { + return errNotEquivalent + } + return d.typesEquivalent(a.Type, b.Type, visited) + + case *Restrict: + b, ok := tb.(*Restrict) + if !ok { + return errNotEquivalent + } + return d.typesEquivalent(a.Type, b.Type, visited) + + case *Func: + b, ok := tb.(*Func) + if !ok { + return errNotEquivalent + } + if a.Name != b.Name { + return errNotEquivalent + } + return d.typesEquivalent(a.Type, b.Type, visited) + + case *FuncProto: + b, ok := tb.(*FuncProto) + if !ok { + return errNotEquivalent + } + + if err := d.typesEquivalent(a.Return, b.Return, visited); err != nil { + return err + } + if len(a.Params) != len(b.Params) { + return errNotEquivalent + } + for i := range a.Params { + if a.Params[i].Name != b.Params[i].Name { + return errNotEquivalent + } + if err := d.typesEquivalent(a.Params[i].Type, b.Params[i].Type, visited); err != nil { + return err + } + } + return nil + + case *Var: + b, ok := tb.(*Var) + if !ok { + return errNotEquivalent + } + if a.Name != b.Name { + return errNotEquivalent + } + if err := d.typesEquivalent(a.Type, b.Type, visited); err != nil { + return err + } + if a.Linkage != b.Linkage { + return errNotEquivalent + } + return nil + + case *Datasec: + b, ok := tb.(*Datasec) + if !ok { + return errNotEquivalent + } + if a.Name != b.Name || len(a.Vars) != len(b.Vars) { + return errNotEquivalent + } + for i := range a.Vars { + if a.Vars[i].Offset != b.Vars[i].Offset || + a.Vars[i].Size != b.Vars[i].Size { + return errNotEquivalent + } + + if err := d.typesEquivalent(a.Vars[i].Type, b.Vars[i].Type, visited); err != nil { + return err + } + } + return nil + + case *declTag: + b, ok := tb.(*declTag) + if !ok { + return errNotEquivalent + } + if a.Value != b.Value || a.Index != b.Index { + return errNotEquivalent + } + return d.typesEquivalent(a.Type, b.Type, visited) + + case *TypeTag: + b, ok := tb.(*TypeTag) + if !ok { + return errNotEquivalent + } + if a.Value != b.Value { + return errNotEquivalent + } + if err := d.typesEquivalent(a.Type, b.Type, visited); err != nil { + return err + } + return nil + + default: + return fmt.Errorf("unsupported type for equivalence: %T", a) + } +} + +// compositeEquivalent checks if two composite types (Struct or Union) are +// Equivalent. +func (d *deduper) compositeEquivalent(at, bt Type, visited []Type) error { + var ma, mb []Member + switch a := at.(type) { + case *Struct: + b, ok := bt.(*Struct) + if !ok { + return errNotEquivalent + } + + if a.Name != b.Name || a.Size != b.Size || len(a.Members) != len(b.Members) { + return errNotEquivalent + } + ma = a.Members + mb = b.Members + + case *Union: + b, ok := bt.(*Union) + if !ok { + return errNotEquivalent + } + + if a.Name != b.Name || a.Size != b.Size || len(a.Members) != len(b.Members) { + return errNotEquivalent + } + ma = a.Members + mb = b.Members + } + + for i := range ma { + if ma[i].Name != mb[i].Name || ma[i].Offset != mb[i].Offset { + return errNotEquivalent + } + + if err := d.typesEquivalent(ma[i].Type, mb[i].Type, visited); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/cilium/ebpf/btf/ext_info.go b/vendor/github.com/cilium/ebpf/btf/ext_info.go index 6ff5e2b90..2054c1d76 100644 --- a/vendor/github.com/cilium/ebpf/btf/ext_info.go +++ b/vendor/github.com/cilium/ebpf/btf/ext_info.go @@ -13,12 +13,23 @@ import ( "github.com/cilium/ebpf/internal" ) -// ExtInfos contains ELF section metadata. +// ExtInfos contains raw, per-section extended BTF metadata from the .BTF.ext +// ELF section. type ExtInfos struct { - // The slices are sorted by offset in ascending order. - funcInfos map[string]FuncOffsets - lineInfos map[string]LineOffsets - relocationInfos map[string]CORERelocationInfos + Funcs map[string]FuncOffsets + Lines map[string]LineOffsets + CORERelos map[string]CORERelocationOffsets +} + +// Section returns the FuncOffsets, LineOffsets and CORERelocationOffsets for +// the given section name. Returns all nils if ExtInfos is nil, or individual +// nils if there is no metadata of that type for the section. +func (ei *ExtInfos) Section(name string) (FuncOffsets, LineOffsets, CORERelocationOffsets) { + if ei == nil { + return nil, nil, nil + } + + return ei.Funcs[name], ei.Lines[name], ei.CORERelos[name] } // loadExtInfosFromELF parses ext infos from the .BTF.ext section in an ELF. @@ -91,7 +102,7 @@ func loadExtInfos(r io.ReaderAt, bo binary.ByteOrder, spec *Spec) (*ExtInfos, er return nil, fmt.Errorf("parsing CO-RE relocation info: %w", err) } - coreRelos := make(map[string]CORERelocationInfos, len(btfCORERelos)) + coreRelos := make(map[string]CORERelocationOffsets, len(btfCORERelos)) for section, brs := range btfCORERelos { coreRelos[section], err = newRelocationInfos(brs, spec, spec.strings) if err != nil { @@ -102,46 +113,6 @@ func loadExtInfos(r io.ReaderAt, bo binary.ByteOrder, spec *Spec) (*ExtInfos, er return &ExtInfos{funcInfos, lineInfos, coreRelos}, nil } -type ( - funcInfoMeta struct{} - coreRelocationMeta struct{} -) - -// Assign per-section metadata from BTF to a section's instructions. -func (ei *ExtInfos) Assign(insns asm.Instructions, section string) { - funcInfos := ei.funcInfos[section] - lineInfos := ei.lineInfos[section] - reloInfos := ei.relocationInfos[section] - - AssignMetadataToInstructions(insns, funcInfos, lineInfos, reloInfos) -} - -// Assign per-instruction metadata to the instructions in insns. -func AssignMetadataToInstructions( - insns asm.Instructions, - funcInfos FuncOffsets, - lineInfos LineOffsets, - reloInfos CORERelocationInfos, -) { - iter := insns.Iterate() - for iter.Next() { - if len(funcInfos) > 0 && funcInfos[0].Offset == iter.Offset { - *iter.Ins = WithFuncMetadata(*iter.Ins, funcInfos[0].Func) - funcInfos = funcInfos[1:] - } - - if len(lineInfos) > 0 && lineInfos[0].Offset == iter.Offset { - *iter.Ins = iter.Ins.WithSource(lineInfos[0].Line) - lineInfos = lineInfos[1:] - } - - if len(reloInfos.infos) > 0 && reloInfos.infos[0].offset == iter.Offset { - iter.Ins.Metadata.Set(coreRelocationMeta{}, reloInfos.infos[0].relo) - reloInfos.infos = reloInfos.infos[1:] - } - } -} - // MarshalExtInfos encodes function and line info embedded in insns into kernel // wire format. // @@ -335,8 +306,8 @@ func parseExtInfoRecordSize(r io.Reader, bo binary.ByteOrder) (uint32, error) { return recordSize, nil } -// FuncOffsets is a sorted slice of FuncOffset. -type FuncOffsets []FuncOffset +// FuncOffsets is a slice of FuncOffsets sorted by offset. +type FuncOffsets = []FuncOffset // The size of a FuncInfo in BTF wire format. var FuncInfoSize = uint32(binary.Size(bpfFuncInfo{})) @@ -518,8 +489,8 @@ func (li *Line) String() string { return li.line } -// LineOffsets contains a sorted list of line infos. -type LineOffsets []LineOffset +// LineOffsets is a slice of LineOffsets sorted by offset. +type LineOffsets = []LineOffset // LineOffset represents a line info and its raw instruction offset. type LineOffset struct { @@ -713,22 +684,32 @@ func (cr *CORERelocation) String() string { return fmt.Sprintf("CORERelocation(%s, %s[%s], local_id=%d)", cr.kind, cr.typ, cr.accessor, cr.id) } +type coreRelocationMeta struct{} + +// CORERelocationMetadata returns the CORERelocation associated with ins. func CORERelocationMetadata(ins *asm.Instruction) *CORERelocation { relo, _ := ins.Metadata.Get(coreRelocationMeta{}).(*CORERelocation) return relo } -// CORERelocationInfos contains a sorted list of co:re relocation infos. -type CORERelocationInfos struct { - infos []coreRelocationInfo +// WithCORERelocationMetadata associates a CORERelocation with ins and returns +// the modified Instruction. +func WithCORERelocationMetadata(ins asm.Instruction, relo *CORERelocation) asm.Instruction { + ins.Metadata.Set(coreRelocationMeta{}, relo) + return ins } -type coreRelocationInfo struct { - relo *CORERelocation - offset asm.RawInstructionOffset +// CORERelocationOffsets is a slice of CORERelocationOffsets sorted by offset. +type CORERelocationOffsets = []CORERelocationOffset + +// CORERelocationOffset represents a CO-RE relocation and an offset at which it +// should be applied. +type CORERelocationOffset struct { + Relo *CORERelocation + Offset asm.RawInstructionOffset } -func newRelocationInfo(relo bpfCORERelo, spec *Spec, strings *stringTable) (*coreRelocationInfo, error) { +func newRelocationInfo(relo bpfCORERelo, spec *Spec, strings *stringTable) (*CORERelocationOffset, error) { typ, err := spec.TypeByID(relo.TypeID) if err != nil { return nil, err @@ -744,7 +725,7 @@ func newRelocationInfo(relo bpfCORERelo, spec *Spec, strings *stringTable) (*cor return nil, fmt.Errorf("accessor %q: %s", accessorStr, err) } - return &coreRelocationInfo{ + return &CORERelocationOffset{ &CORERelocation{ typ, accessor, @@ -755,20 +736,21 @@ func newRelocationInfo(relo bpfCORERelo, spec *Spec, strings *stringTable) (*cor }, nil } -func newRelocationInfos(brs []bpfCORERelo, spec *Spec, strings *stringTable) (CORERelocationInfos, error) { - rs := CORERelocationInfos{ - infos: make([]coreRelocationInfo, 0, len(brs)), - } +func newRelocationInfos(brs []bpfCORERelo, spec *Spec, strings *stringTable) (CORERelocationOffsets, error) { + rs := make(CORERelocationOffsets, 0, len(brs)) + for _, br := range brs { relo, err := newRelocationInfo(br, spec, strings) if err != nil { - return CORERelocationInfos{}, fmt.Errorf("offset %d: %w", br.InsnOff, err) + return nil, fmt.Errorf("offset %d: %w", br.InsnOff, err) } - rs.infos = append(rs.infos, *relo) + rs = append(rs, *relo) } - sort.Slice(rs.infos, func(i, j int) bool { - return rs.infos[i].offset < rs.infos[j].offset + + sort.Slice(rs, func(i, j int) bool { + return rs[i].Offset < rs[j].Offset }) + return rs, nil } diff --git a/vendor/github.com/cilium/ebpf/btf/feature.go b/vendor/github.com/cilium/ebpf/btf/feature.go index 5b427f5d3..df365d42f 100644 --- a/vendor/github.com/cilium/ebpf/btf/feature.go +++ b/vendor/github.com/cilium/ebpf/btf/feature.go @@ -135,7 +135,7 @@ var haveEnum64 = internal.NewFeatureTest("ENUM64", func() error { }, "6.0") func probeBTF(typ Type) error { - b, err := NewBuilder([]Type{typ}) + b, err := NewBuilder([]Type{typ}, nil) if err != nil { return err } diff --git a/vendor/github.com/cilium/ebpf/btf/marshal.go b/vendor/github.com/cilium/ebpf/btf/marshal.go index 308ce8d34..c336a695c 100644 --- a/vendor/github.com/cilium/ebpf/btf/marshal.go +++ b/vendor/github.com/cilium/ebpf/btf/marshal.go @@ -78,6 +78,13 @@ type Builder struct { stableIDs map[Type]TypeID // Explicitly added strings. strings *stringTableBuilder + // Deduplication data structure. + deduper *deduper +} + +type BuilderOptions struct { + // Deduplicate enables type deduplication. + Deduplicate bool } // NewBuilder creates a Builder from a list of types. @@ -85,11 +92,20 @@ type Builder struct { // It is more efficient than calling [Add] individually. // // Returns an error if adding any of the types fails. -func NewBuilder(types []Type) (*Builder, error) { +func NewBuilder(types []Type, opts *BuilderOptions) (*Builder, error) { + if opts == nil { + opts = &BuilderOptions{} + } + b := &Builder{ make([]Type, 0, len(types)), make(map[Type]TypeID, len(types)), nil, + nil, + } + + if opts.Deduplicate { + b.deduper = newDeduper() } for _, typ := range types { @@ -117,6 +133,14 @@ func (b *Builder) Add(typ Type) (TypeID, error) { b.stableIDs = make(map[Type]TypeID) } + if b.deduper != nil { + var err error + typ, err = b.deduper.deduplicate(typ) + if err != nil { + return 0, err + } + } + if _, ok := typ.(*Void); ok { // Equality is weird for void, since it is a zero sized type. return 0, nil @@ -144,6 +168,19 @@ func (b *Builder) Add(typ Type) (TypeID, error) { return id, nil } +// Spec marshals the Builder's types and returns a new Spec to query them. +// +// The resulting Spec does not share any state with the Builder, subsequent +// additions to the Builder will not affect the Spec. +func (b *Builder) Spec() (*Spec, error) { + buf, err := b.Marshal(make([]byte, 0), nil) + if err != nil { + return nil, err + } + + return loadRawSpec(buf, nil) +} + // Marshal encodes all types in the Marshaler into BTF wire format. // // opts may be nil. diff --git a/vendor/github.com/cilium/ebpf/btf/types.go b/vendor/github.com/cilium/ebpf/btf/types.go index fc0a59744..143bab609 100644 --- a/vendor/github.com/cilium/ebpf/btf/types.go +++ b/vendor/github.com/cilium/ebpf/btf/types.go @@ -21,15 +21,25 @@ type TypeID = sys.TypeID // Type represents a type described by BTF. // -// Identity of Type follows the [Go specification]: two Types are considered -// equal if they have the same concrete type and the same dynamic value, aka -// they point at the same location in memory. This means that the following -// Types are considered distinct even though they have the same "shape". +// A Type has three properties where compared to other Types. +// +// Identity: follows the [Go specification], two Types are considered identical +// if they have the same concrete type and the same dynamic value, aka they point +// at the same location in memory. This means that the following Types are +// considered distinct even though they have the same "shape". // // a := &Int{Size: 1} // b := &Int{Size: 1} // a != b // +// Equivalence: two Types are considered equivalent if they have the same shape +// and thus are functionally interchangeable, even if they are located at different +// memory addresses. The above two Int types are equivalent. +// +// Compatibility: two Types are considered compatible according to the rules of CO-RE +// see [coreAreTypesCompatible] for details. This is a non-commutative property, +// so A may be compatible with B, but B not compatible with A. +// // [Go specification]: https://go.dev/ref/spec#Comparison_operators type Type interface { // Type can be formatted using the %s and %v verbs. %s outputs only the @@ -50,7 +60,7 @@ type Type interface { // Make a copy of the type, without copying Type members. copy() Type - // New implementations must update walkType. + // New implementations must update children, deduper.typeHash, and typesEquivalent. } var ( @@ -428,6 +438,8 @@ type Func struct { ParamTags [][]string } +type funcInfoMeta struct{} + func FuncMetadata(ins *asm.Instruction) *Func { fn, _ := ins.Metadata.Get(funcInfoMeta{}).(*Func) return fn diff --git a/vendor/github.com/cilium/ebpf/collection.go b/vendor/github.com/cilium/ebpf/collection.go index f99f354d4..3e5b05a47 100644 --- a/vendor/github.com/cilium/ebpf/collection.go +++ b/vendor/github.com/cilium/ebpf/collection.go @@ -10,7 +10,6 @@ import ( "slices" "strings" - "github.com/cilium/ebpf/asm" "github.com/cilium/ebpf/btf" "github.com/cilium/ebpf/internal" "github.com/cilium/ebpf/internal/kallsyms" @@ -73,7 +72,7 @@ func (cs *CollectionSpec) Copy() *CollectionSpec { } for name, spec := range cs.Variables { - cpy.Variables[name] = spec.copy(&cpy) + cpy.Variables[name] = spec.Copy() } if cs.Variables == nil { cpy.Variables = nil @@ -95,97 +94,6 @@ func copyMapOfSpecs[T interface{ Copy() T }](m map[string]T) map[string]T { return cpy } -// RewriteMaps replaces all references to specific maps. -// -// Use this function to use pre-existing maps instead of creating new ones -// when calling NewCollection. Any named maps are removed from CollectionSpec.Maps. -// -// Returns an error if a named map isn't used in at least one program. -// -// Deprecated: Pass CollectionOptions.MapReplacements when loading the Collection -// instead. -func (cs *CollectionSpec) RewriteMaps(maps map[string]*Map) error { - for symbol, m := range maps { - // have we seen a program that uses this symbol / map - seen := false - for progName, progSpec := range cs.Programs { - err := progSpec.Instructions.AssociateMap(symbol, m) - - switch { - case err == nil: - seen = true - - case errors.Is(err, asm.ErrUnreferencedSymbol): - // Not all programs need to use the map - - default: - return fmt.Errorf("program %s: %w", progName, err) - } - } - - if !seen { - return fmt.Errorf("map %s not referenced by any programs", symbol) - } - - // Prevent NewCollection from creating rewritten maps - delete(cs.Maps, symbol) - } - - return nil -} - -// MissingConstantsError is returned by [CollectionSpec.RewriteConstants]. -type MissingConstantsError struct { - // The constants missing from .rodata. - Constants []string -} - -func (m *MissingConstantsError) Error() string { - return fmt.Sprintf("some constants are missing from .rodata: %s", strings.Join(m.Constants, ", ")) -} - -// RewriteConstants replaces the value of multiple constants. -// -// The constant must be defined like so in the C program: -// -// volatile const type foobar; -// volatile const type foobar = default; -// -// Replacement values must be of the same length as the C sizeof(type). -// If necessary, they are marshalled according to the same rules as -// map values. -// -// From Linux 5.5 the verifier will use constants to eliminate dead code. -// -// Returns an error wrapping [MissingConstantsError] if a constant doesn't exist. -// -// Deprecated: Use [CollectionSpec.Variables] to interact with constants instead. -// RewriteConstants is now a wrapper around the VariableSpec API. -func (cs *CollectionSpec) RewriteConstants(consts map[string]interface{}) error { - var missing []string - for n, c := range consts { - v, ok := cs.Variables[n] - if !ok { - missing = append(missing, n) - continue - } - - if !v.Constant() { - return fmt.Errorf("variable %s is not a constant", n) - } - - if err := v.Set(c); err != nil { - return fmt.Errorf("rewriting constant %s: %w", n, err) - } - } - - if len(missing) != 0 { - return fmt.Errorf("rewrite constants: %w", &MissingConstantsError{Constants: missing}) - } - - return nil -} - // Assign the contents of a CollectionSpec to a struct. // // This function is a shortcut to manually checking the presence @@ -514,6 +422,10 @@ func (cl *collectionLoader) loadMap(mapName string) (*Map, error) { return m, nil } + if err := mapSpec.updateDataSection(cl.coll.Variables, mapName); err != nil { + return nil, fmt.Errorf("assembling contents of map %s: %w", mapName, err) + } + m, err := newMapWithOptions(mapSpec, cl.opts.Maps, cl.types) if err != nil { return nil, fmt.Errorf("map %s: %w", mapName, err) @@ -597,19 +509,7 @@ func (cl *collectionLoader) loadVariable(varName string) (*Variable, error) { return nil, fmt.Errorf("unknown variable %s", varName) } - // Get the key of the VariableSpec's MapSpec in the CollectionSpec. - var mapName string - for n, ms := range cl.coll.Maps { - if ms == varSpec.m { - mapName = n - break - } - } - if mapName == "" { - return nil, fmt.Errorf("variable %s: underlying MapSpec %s was removed from CollectionSpec", varName, varSpec.m.Name) - } - - m, err := cl.loadMap(mapName) + m, err := cl.loadMap(varSpec.SectionName) if err != nil { return nil, fmt.Errorf("variable %s: %w", varName, err) } @@ -626,14 +526,14 @@ func (cl *collectionLoader) loadVariable(varName string) (*Variable, error) { mm, err = m.Memory() } if err != nil && !errors.Is(err, ErrNotSupported) { - return nil, fmt.Errorf("variable %s: getting memory for map %s: %w", varName, mapName, err) + return nil, fmt.Errorf("variable %s: getting memory for map %s: %w", varName, varSpec.SectionName, err) } v, err := newVariable( - varSpec.name, - varSpec.offset, - varSpec.size, - varSpec.t, + varSpec.Name, + varSpec.Offset, + varSpec.Size(), + varSpec.Type, mm, ) if err != nil { @@ -715,9 +615,9 @@ func (cl *collectionLoader) populateStructOps(m *Map, mapSpec *MapSpec) error { return fmt.Errorf("value should be a *Struct") } - userData, ok := mapSpec.Contents[0].Value.([]byte) - if !ok { - return fmt.Errorf("value should be an array of byte") + userData, err := mapSpec.dataSection() + if err != nil { + return fmt.Errorf("getting data section: %w", err) } if len(userData) < int(userType.Size) { return fmt.Errorf("user data too short: have %d, need at least %d", len(userData), userType.Size) diff --git a/vendor/github.com/cilium/ebpf/collection_windows.go b/vendor/github.com/cilium/ebpf/collection_windows.go index c1bbaa21d..38ce73e8b 100644 --- a/vendor/github.com/cilium/ebpf/collection_windows.go +++ b/vendor/github.com/cilium/ebpf/collection_windows.go @@ -11,8 +11,8 @@ import ( ) func loadCollectionFromNativeImage(file string) (_ *Collection, err error) { - mapFds := make([]efw.FD, 16) - programFds := make([]efw.FD, 16) + mapFds := make([]efw.FD, 32) + programFds := make([]efw.FD, 32) var maps map[string]*Map var programs map[string]*Program diff --git a/vendor/github.com/cilium/ebpf/elf_reader.go b/vendor/github.com/cilium/ebpf/elf_reader.go index f2c9196b7..3f609d2e9 100644 --- a/vendor/github.com/cilium/ebpf/elf_reader.go +++ b/vendor/github.com/cilium/ebpf/elf_reader.go @@ -8,6 +8,8 @@ import ( "errors" "fmt" "io" + "iter" + "maps" "math" "os" "slices" @@ -116,8 +118,17 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) { case sec.Type == elf.SHT_REL: // Store relocations under the section index of the target relSections[elf.SectionIndex(sec.Info)] = sec - case sec.Type == elf.SHT_PROGBITS && (sec.Flags&elf.SHF_EXECINSTR) != 0 && sec.Size > 0: - sections[idx] = newElfSection(sec, programSection) + case sec.Type == elf.SHT_PROGBITS && sec.Size > 0: + if (sec.Flags&elf.SHF_EXECINSTR) != 0 && sec.Size > 0 { + sections[idx] = newElfSection(sec, programSection) + } else if sec.Name == structOpsLinkSec { + // classification based on sec names so that struct_ops-specific + // sections (.struct_ops.link) is correctly recognized + // as non-executable PROGBITS, allowing value placement and link metadata to be loaded. + sections[idx] = newElfSection(sec, structOpsSection) + } else if sec.Name == structOpsSec { + return nil, fmt.Errorf("section %q: got '.struct_ops' section: %w", sec.Name, ErrNotSupported) + } } } @@ -186,6 +197,11 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) { return nil, fmt.Errorf("load programs: %w", err) } + // assiociate members in structs with ProgramSpecs using relo + if err := ec.associateStructOpsRelocs(progs); err != nil { + return nil, fmt.Errorf("load struct_ops: %w", err) + } + return &CollectionSpec{ ec.maps, progs, @@ -239,6 +255,7 @@ const ( btfMapSection programSection dataSection + structOpsSection ) type elfSection struct { @@ -263,6 +280,17 @@ func newElfSection(section *elf.Section, kind elfSectionKind) *elfSection { } } +// symbolsSorted returns the section's symbols sorted by offset. +func (es *elfSection) symbolsSorted() iter.Seq2[uint64, elf.Symbol] { + return func(yield func(uint64, elf.Symbol) bool) { + for _, off := range slices.Sorted(maps.Keys(es.symbols)) { + if !yield(off, es.symbols[off]) { + return + } + } + } +} + // assignSymbols takes a list of symbols and assigns them to their // respective sections, indexed by name. func (ec *elfCode) assignSymbols(symbols []elf.Symbol) { @@ -290,10 +318,11 @@ func (ec *elfCode) assignSymbols(symbols []elf.Symbol) { if symType != elf.STT_NOTYPE && symType != elf.STT_FUNC { continue } - // LLVM emits LBB_ (Local Basic Block) symbols that seem to be jump - // targets within sections, but BPF has no use for them. - if symType == elf.STT_NOTYPE && elf.ST_BIND(symbol.Info) == elf.STB_LOCAL && - strings.HasPrefix(symbol.Name, "LBB") { + + // Program sections may contain NOTYPE symbols with local scope, these are + // usually labels for jumps. We do not care for these for the purposes of + // linking and they may overlap with function symbols. + if symType == elf.STT_NOTYPE && elf.ST_BIND(symbol.Info) == elf.STB_LOCAL { continue } // Only collect symbols that occur in program/maps/data sections. @@ -405,47 +434,99 @@ func (ec *elfCode) loadProgramSections() (map[string]*ProgramSpec, error) { // be narrowed down to STT_NOTYPE (emitted by clang <8) or STT_FUNC. // // The resulting map is indexed by function name. -func (ec *elfCode) loadFunctions(section *elfSection) (map[string]asm.Instructions, error) { - r := bufio.NewReader(section.Open()) +func (ec *elfCode) loadFunctions(sec *elfSection) (map[string]asm.Instructions, error) { + progs := make(map[string]asm.Instructions) + + // Pull out ExtInfos once per section to avoid map lookups on every + // instruction. + fo, lo, ro := ec.extInfo.Section(sec.Name) + + // Raw instruction count since start of the section. ExtInfos point at raw + // insn offsets and ignore the gaps between symbols in case of linked objects. + // We need to count them, we can't obtain this info by any other means. + var raw asm.RawInstructionOffset + + // Sort symbols by offset so we can track instructions by their raw offsets. + for _, sym := range sec.symbolsSorted() { + if progs[sym.Name] != nil { + return nil, fmt.Errorf("duplicate symbol %s in section %s", sym.Name, sec.Name) + } - // Decode the section's instruction stream. - insns := make(asm.Instructions, 0, section.Size/asm.InstructionSize) - insns, err := asm.AppendInstructions(insns, r, ec.ByteOrder, platform.Linux) - if err != nil { - return nil, fmt.Errorf("decoding instructions for section %s: %w", section.Name, err) - } - if len(insns) == 0 { - return nil, fmt.Errorf("no instructions found in section %s", section.Name) - } + // Decode the symbol's instruction stream, limited to its size. + sr := internal.NewBufferedSectionReader(sec, int64(sym.Value), int64(sym.Size)) + insns := make(asm.Instructions, 0, sym.Size/asm.InstructionSize) + insns, err := asm.AppendInstructions(insns, sr, ec.ByteOrder, platform.Linux) + if err != nil { + return nil, fmt.Errorf("decoding instructions for symbol %s in section %s: %w", sym.Name, sec.Name, err) + } + if len(insns) == 0 { + return nil, fmt.Errorf("no instructions found for symbol %s in section %s", sym.Name, sec.Name) + } - iter := insns.Iterate() - for iter.Next() { - ins := iter.Ins - offset := iter.Offset.Bytes() + // Mark the first instruction as the start of a function. + insns[0] = insns[0].WithSymbol(sym.Name) - // Tag Symbol Instructions. - if sym, ok := section.symbols[offset]; ok { - *ins = ins.WithSymbol(sym.Name) - } + iter := insns.Iterate() + for iter.Next() { + // Global byte offset of the instruction within the ELF section. + offset := sym.Value + iter.Offset.Bytes() - // Apply any relocations for the current instruction. - // If no relocation is present, resolve any section-relative function calls. - if rel, ok := section.relocations[offset]; ok { - if err := ec.relocateInstruction(ins, rel); err != nil { - return nil, fmt.Errorf("offset %d: relocating instruction: %w", offset, err) - } - } else { - if err := referenceRelativeJump(ins, offset, section.symbols); err != nil { - return nil, fmt.Errorf("offset %d: resolving relative jump: %w", offset, err) + // Apply any relocations for the current instruction. If no relocation is + // present, resolve any section-relative function calls. + if rel, ok := sec.relocations[offset]; ok { + if err := ec.relocateInstruction(iter.Ins, rel); err != nil { + return nil, fmt.Errorf("offset %d in section %s: relocating instruction: %w", offset, sec.Name, err) + } + } else { + if err := referenceRelativeJump(iter.Ins, offset, sec.symbols); err != nil { + return nil, fmt.Errorf("offset %d in section %s: resolving relative jump: %w", offset, sec.Name, err) + } } + + assignMetadata(iter.Ins, raw, &fo, &lo, &ro) + + raw += iter.Ins.Width() } + + // Emit the program's instructions. + progs[sym.Name] = insns + } + + return progs, nil +} + +// take pops and returns the first item in q if it matches the given predicate +// f. Otherwise, it returns nil. +func take[T any](q *[]T, f func(T) bool) *T { + if q == nil || len(*q) == 0 { + return nil } - if ec.extInfo != nil { - ec.extInfo.Assign(insns, section.Name) + out := (*q)[0] + if f(out) { + *q = (*q)[1:] + return &out } - return splitSymbols(insns) + return nil +} + +// Tag the instruction with any ExtInfo metadata that's pointing at the given +// raw instruction. +func assignMetadata(ins *asm.Instruction, raw asm.RawInstructionOffset, + fo *btf.FuncOffsets, lo *btf.LineOffsets, ro *btf.CORERelocationOffsets) { + + if f := take(fo, func(f btf.FuncOffset) bool { return f.Offset == raw }); f != nil { + *ins = btf.WithFuncMetadata(*ins, f.Func) + } + + if l := take(lo, func(l btf.LineOffset) bool { return l.Offset == raw }); l != nil { + *ins = ins.WithSource(l.Line) + } + + if r := take(ro, func(r btf.CORERelocationOffset) bool { return r.Offset == raw }); r != nil { + *ins = btf.WithCORERelocationMetadata(*ins, r.Relo) + } } // referenceRelativeJump turns a relative jump to another bpf subprogram within @@ -573,7 +654,7 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err switch typ { case elf.STT_NOTYPE, elf.STT_FUNC: - if bind != elf.STB_GLOBAL { + if bind != elf.STB_GLOBAL && bind != elf.STB_WEAK { return fmt.Errorf("call: %s: %w: %s", name, errUnsupportedBinding, bind) } @@ -710,89 +791,91 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err return nil } +// loadMaps iterates over all ELF sections marked as map sections (like .maps) +// and parses each symbol into a MapSpec. func (ec *elfCode) loadMaps() error { for _, sec := range ec.sections { if sec.kind != mapSection { continue } - nSym := len(sec.symbols) - if nSym == 0 { + if len(sec.symbols) == 0 { return fmt.Errorf("section %v: no symbols", sec.Name) } - if sec.Size%uint64(nSym) != 0 { - return fmt.Errorf("section %v: map descriptors are not of equal size", sec.Name) - } - - // If the ELF has BTF, pull out the btf.Var for each map definition to - // extract decl tags from. - varsByName := make(map[string]*btf.Var) - if ec.btf != nil { - var ds *btf.Datasec - if err := ec.btf.TypeByName(sec.Name, &ds); err == nil { - for _, vsi := range ds.Vars { - v, ok := btf.As[*btf.Var](vsi.Type) - if !ok { - return fmt.Errorf("section %v: btf.VarSecInfo doesn't point to a *btf.Var: %T", sec.Name, vsi.Type) - } - varsByName[string(v.Name)] = v - } - } + vars, err := ec.sectionVars(ec.btf, sec.Name) + if err != nil { + return fmt.Errorf("section %v: loading map variable BTF: %w", sec.Name, err) } - var ( - r = bufio.NewReader(sec.Open()) - size = sec.Size / uint64(nSym) - ) - for i, offset := 0, uint64(0); i < nSym; i, offset = i+1, offset+size { - mapSym, ok := sec.symbols[offset] - if !ok { - return fmt.Errorf("section %s: missing symbol for map at offset %d", sec.Name, offset) - } - - mapName := mapSym.Name - if ec.maps[mapName] != nil { - return fmt.Errorf("section %v: map %v already exists", sec.Name, mapSym) + for _, sym := range sec.symbols { + name := sym.Name + if ec.maps[name] != nil { + return fmt.Errorf("duplicate symbol %s in section %s", name, sec.Name) } - lr := io.LimitReader(r, int64(size)) + sr := internal.NewBufferedSectionReader(sec, int64(sym.Value), int64(sym.Size)) spec := MapSpec{ - Name: sanitizeName(mapName, -1), + Name: sanitizeName(name, -1), } switch { - case binary.Read(lr, ec.ByteOrder, &spec.Type) != nil: - return fmt.Errorf("map %s: missing type", mapName) - case binary.Read(lr, ec.ByteOrder, &spec.KeySize) != nil: - return fmt.Errorf("map %s: missing key size", mapName) - case binary.Read(lr, ec.ByteOrder, &spec.ValueSize) != nil: - return fmt.Errorf("map %s: missing value size", mapName) - case binary.Read(lr, ec.ByteOrder, &spec.MaxEntries) != nil: - return fmt.Errorf("map %s: missing max entries", mapName) - case binary.Read(lr, ec.ByteOrder, &spec.Flags) != nil: - return fmt.Errorf("map %s: missing flags", mapName) - } - - extra, err := io.ReadAll(lr) + case binary.Read(sr, ec.ByteOrder, &spec.Type) != nil: + return fmt.Errorf("map %s: missing type", name) + case binary.Read(sr, ec.ByteOrder, &spec.KeySize) != nil: + return fmt.Errorf("map %s: missing key size", name) + case binary.Read(sr, ec.ByteOrder, &spec.ValueSize) != nil: + return fmt.Errorf("map %s: missing value size", name) + case binary.Read(sr, ec.ByteOrder, &spec.MaxEntries) != nil: + return fmt.Errorf("map %s: missing max entries", name) + case binary.Read(sr, ec.ByteOrder, &spec.Flags) != nil: + return fmt.Errorf("map %s: missing flags", name) + } + + extra, err := io.ReadAll(sr) if err != nil { - return fmt.Errorf("map %s: reading map tail: %w", mapName, err) + return fmt.Errorf("map %s: reading map tail: %w", name, err) } if len(extra) > 0 { spec.Extra = bytes.NewReader(extra) } - if v, ok := varsByName[mapName]; ok { + if v, ok := vars[name]; ok { spec.Tags = slices.Clone(v.Tags) } - ec.maps[mapName] = &spec + ec.maps[name] = &spec } } return nil } +// sectionVars looks up the BTF Datasec for the given section name and returns a +// map of variable names to their btf.Var definitions. +func (ec *elfCode) sectionVars(spec *btf.Spec, sec string) (map[string]*btf.Var, error) { + vars := make(map[string]*btf.Var) + + if spec == nil { + return vars, nil + } + + var ds *btf.Datasec + if err := ec.btf.TypeByName(sec, &ds); err != nil { + return vars, nil + } + + for _, vsi := range ds.Vars { + v, ok := btf.As[*btf.Var](vsi.Type) + if !ok { + return nil, fmt.Errorf("btf.VarSecInfo doesn't point to a *btf.Var: %T", vsi.Type) + } + vars[string(v.Name)] = v + } + + return vars, nil +} + // loadBTFMaps iterates over all ELF sections marked as BTF map sections // (like .maps) and parses them into MapSpecs. Dump the .maps section and // any relocations with `readelf -x .maps -r `. @@ -806,32 +889,36 @@ func (ec *elfCode) loadBTFMaps() error { return fmt.Errorf("missing BTF") } - // Each section must appear as a DataSec in the ELF's BTF blob. - var ds *btf.Datasec - if err := ec.btf.TypeByName(sec.Name, &ds); err != nil { - return fmt.Errorf("cannot find section '%s' in BTF: %w", sec.Name, err) + vars, err := ec.sectionVars(ec.btf, sec.Name) + if err != nil { + return fmt.Errorf("section %v: loading map variable BTF: %w", sec.Name, err) } - // Open a Reader to the ELF's raw section bytes so we can assert that all - // of them are zero on a per-map (per-Var) basis. For now, the section's - // sole purpose is to receive relocations, so all must be zero. - rs := sec.Open() + if len(vars) != len(sec.symbols) { + return fmt.Errorf("section %v: contains %d symbols but %d btf.Vars", sec.Name, len(sec.symbols), len(vars)) + } - for _, vs := range ds.Vars { - // BPF maps are declared as and assigned to global variables, - // so iterate over each Var in the DataSec and validate their types. - v, ok := vs.Type.(*btf.Var) + syms := make(map[string]elf.Symbol) + for _, sym := range sec.symbols { + syms[sym.Name] = sym + } + + for _, v := range vars { + name := v.Name + + // Find the ELF symbol corresponding to this Var. + sym, ok := syms[name] if !ok { - return fmt.Errorf("section %v: unexpected type %s", sec.Name, vs.Type) + return fmt.Errorf("section %v: missing symbol for map %s", sec.Name, name) } - name := string(v.Name) + + sr := internal.NewBufferedSectionReader(sec, int64(sym.Value), int64(sym.Size)) // The BTF metadata for each Var contains the full length of the map // declaration, so read the corresponding amount of bytes from the ELF. // This way, we can pinpoint which map declaration contains unexpected // (and therefore unsupported) data. - _, err := io.Copy(internal.DiscardZeroes{}, io.LimitReader(rs, int64(vs.Size))) - if err != nil { + if _, err = io.Copy(internal.DiscardZeroes{}, sr); err != nil { return fmt.Errorf("section %v: map %s: initializing BTF map definitions: %w", sec.Name, name, internal.ErrNotSupported) } @@ -845,22 +932,12 @@ func (ec *elfCode) loadBTFMaps() error { return fmt.Errorf("expected struct, got %s", v.Type) } - mapSpec, err := mapSpecFromBTF(sec, &vs, mapStruct, ec.btf, name, false) + spec, err := mapSpecFromBTF(sec, sym, v, mapStruct, ec.btf, name, false) if err != nil { return fmt.Errorf("map %v: %w", name, err) } - ec.maps[name] = mapSpec - } - - // Drain the ELF section reader to make sure all bytes are accounted for - // with BTF metadata. - i, err := io.Copy(io.Discard, rs) - if err != nil { - return fmt.Errorf("section %v: unexpected error reading remainder of ELF section: %w", sec.Name, err) - } - if i > 0 { - return fmt.Errorf("section %v: %d unexpected remaining bytes in ELF section, invalid BTF?", sec.Name, i) + ec.maps[name] = spec } } @@ -870,7 +947,7 @@ func (ec *elfCode) loadBTFMaps() error { // mapSpecFromBTF produces a MapSpec based on a btf.Struct def representing // a BTF map definition. The name and spec arguments will be copied to the // resulting MapSpec, and inner must be true on any recursive invocations. -func mapSpecFromBTF(es *elfSection, vs *btf.VarSecinfo, def *btf.Struct, spec *btf.Spec, name string, inner bool) (*MapSpec, error) { +func mapSpecFromBTF(es *elfSection, sym elf.Symbol, v *btf.Var, def *btf.Struct, spec *btf.Spec, name string, inner bool) (*MapSpec, error) { var ( key, value btf.Type keySize, valueSize uint64 @@ -1014,7 +1091,7 @@ func mapSpecFromBTF(es *elfSection, vs *btf.VarSecinfo, def *btf.Struct, spec *b // on kernels 5.2 and up) // Pass the BTF spec from the parent object, since both parent and // child must be created from the same BTF blob (on kernels that support BTF). - innerMapSpec, err = mapSpecFromBTF(es, vs, t, spec, name+"_inner", true) + innerMapSpec, err = mapSpecFromBTF(es, sym, v, t, spec, name+"_inner", true) if err != nil { return nil, fmt.Errorf("can't parse BTF map definition of inner map: %w", err) } @@ -1030,7 +1107,7 @@ func mapSpecFromBTF(es *elfSection, vs *btf.VarSecinfo, def *btf.Struct, spec *b return nil, fmt.Errorf("unsupported value type %q in 'values' field", t) } - contents, err = resolveBTFValuesContents(es, vs, member) + contents, err = resolveBTFValuesContents(es, sym, member) if err != nil { return nil, fmt.Errorf("resolving values contents: %w", err) } @@ -1053,11 +1130,6 @@ func mapSpecFromBTF(es *elfSection, vs *btf.VarSecinfo, def *btf.Struct, spec *b valueSize = 0 } - v, ok := btf.As[*btf.Var](vs.Type) - if !ok { - return nil, fmt.Errorf("BTF map definition: btf.VarSecInfo doesn't point to a *btf.Var: %T", vs.Type) - } - return &MapSpec{ Name: sanitizeName(name, -1), Type: MapType(mapType), @@ -1119,58 +1191,70 @@ func resolveBTFArrayMacro(typ btf.Type) (btf.Type, error) { return ptr.Target, nil } -// resolveBTFValuesContents resolves relocations into ELF sections belonging -// to btf.VarSecinfo's. This can be used on the 'values' member in BTF map -// definitions to extract static declarations of map contents. -func resolveBTFValuesContents(es *elfSection, vs *btf.VarSecinfo, member btf.Member) ([]MapKV, error) { - // The elements of a .values pointer array are not encoded in BTF. - // Instead, relocations are generated into each array index. - // However, it's possible to leave certain array indices empty, so all - // indices' offsets need to be checked for emitted relocations. - - // The offset of the 'values' member within the _struct_ (in bits) - // is the starting point of the array. Convert to bytes. Add VarSecinfo - // offset to get the absolute position in the ELF blob. - start := member.Offset.Bytes() + vs.Offset - // 'values' is encoded in BTF as a zero (variable) length struct - // member, and its contents run until the end of the VarSecinfo. - // Add VarSecinfo offset to get the absolute position in the ELF blob. - end := vs.Size + vs.Offset - // The size of an address in this section. This determines the width of - // an index in the array. - align := uint32(es.Addralign) - - // Check if variable-length section is aligned. - if (end-start)%align != 0 { - return nil, errors.New("unaligned static values section") - } +// valuesRelocations returns an iterator over the relocations in the ELF section +// corresponding to the elements of a .values array in a BTF map definition. Each +// iteration yields the array index and the symbol referenced by the relocation +// at that index. Empty indices are skipped. +func valuesRelocations(es *elfSection, sym elf.Symbol, member btf.Member) iter.Seq2[uint32, elf.Symbol] { + // The elements of a .values pointer array are not encoded in BTF itself. + // Instead, each array index receives a relocation pointing at a symbol + // (map/prog) in another section. However, it's possible to leave certain + // array indices empty, so all indices' offsets need to be checked for emitted + // relocations. + + // Absolute offset of the .values member within the section. + start := sym.Value + uint64(member.Offset.Bytes()) + + // .values is a variable-length struct member, so its contents run until the + // end of the symbol. The symbol offset + size is the absolute offset of the + // end of the array in the section. + end := sym.Value + sym.Size + + // The size of an address in this section. This determines the width of an + // index in the array. + align := es.Addralign + + // Amount of elements in the .values array. elems := (end - start) / align - if elems == 0 { - return nil, nil - } + return func(yield func(uint32, elf.Symbol) bool) { + for i := range uint32(elems) { + // off increases by align on each iteration, starting at .values. + off := start + (uint64(i) * align) - contents := make([]MapKV, 0, elems) + r, ok := es.relocations[off] + if !ok { + continue + } - // k is the array index, off is its corresponding ELF section offset. - for k, off := uint32(0), start; k < elems; k, off = k+1, off+align { - r, ok := es.relocations[uint64(off)] - if !ok { - continue + if !yield(i, r) { + return + } } + } + +} - // Relocation exists for the current offset in the ELF section. - // Emit a value stub based on the type of relocation to be replaced by - // a real fd later in the pipeline before populating the map. - // Map keys are encoded in MapKV entries, so empty array indices are - // skipped here. - switch t := elf.ST_TYPE(r.Info); t { +// resolveBTFValuesContents looks up the symbols referenced by the relocations +// in a .values array and returns them as MapKV pairs, where the key is the +// array index and the value is the symbol name. Empty indices are skipped. +func resolveBTFValuesContents(es *elfSection, sym elf.Symbol, member btf.Member) ([]MapKV, error) { + var contents []MapKV + + if member.Offset.Bytes() > uint32(sym.Size) { + return nil, fmt.Errorf("member offset %d exceeds symbol size %d", member.Offset.Bytes(), sym.Size) + } + + for i, sym := range valuesRelocations(es, sym, member) { + // Emit a value stub based on the type of relocation to be replaced by a + // real fd later in the pipeline before populating the Map. + switch t := elf.ST_TYPE(sym.Info); t { case elf.STT_FUNC: - contents = append(contents, MapKV{uint32(k), r.Name}) + contents = append(contents, MapKV{i, sym.Name}) case elf.STT_OBJECT: - contents = append(contents, MapKV{uint32(k), r.Name}) + contents = append(contents, MapKV{i, sym.Name}) default: - return nil, fmt.Errorf("unknown relocation type %v for symbol %s", t, r.Name) + return nil, fmt.Errorf("unknown relocation type %v for symbol %s", t, sym.Name) } } @@ -1210,14 +1294,15 @@ func (ec *elfCode) loadDataSections() error { mapSpec.Flags = sys.BPF_F_RDONLY_PROG } + var data []byte switch sec.Type { // Only open the section if we know there's actual data to be read. case elf.SHT_PROGBITS: - data, err := sec.Data() + var err error + data, err = sec.Data() if err != nil { return fmt.Errorf("data section %s: can't get contents: %w", sec.Name, err) } - mapSpec.Contents = []MapKV{{uint32(0), data}} case elf.SHT_NOBITS: // NOBITS sections like .bss contain only zeroes and are not allocated in @@ -1225,12 +1310,14 @@ func (ec *elfCode) loadDataSections() error { // them. Don't attempt reading zeroes from the ELF, instead allocate the // zeroed memory to support getting and setting VariableSpecs for sections // like .bss. - mapSpec.Contents = []MapKV{{uint32(0), make([]byte, sec.Size)}} + data = make([]byte, sec.Size) default: return fmt.Errorf("data section %s: unknown section type %s", sec.Name, sec.Type) } + mapSpec.Contents = []MapKV{{uint32(0), data}} + for off, sym := range sec.symbols { // Skip symbols marked with the 'hidden' attribute. if elf.ST_VISIBILITY(sym.Other) == elf.STV_HIDDEN || @@ -1259,11 +1346,19 @@ func (ec *elfCode) loadDataSections() error { continue } + if off+sym.Size > uint64(len(data)) { + return fmt.Errorf("data section %s: variable %s exceeds section bounds", sec.Name, sym.Name) + } + + if off > math.MaxUint32 { + return fmt.Errorf("data section %s: variable %s offset %d exceeds maximum", sec.Name, sym.Name, off) + } + ec.vars[sym.Name] = &VariableSpec{ - name: sym.Name, - offset: off, - size: sym.Size, - m: mapSpec, + SectionName: sec.Name, + Name: sym.Name, + Offset: uint32(off), + Value: slices.Clone(data[off : off+sym.Size]), } } @@ -1294,17 +1389,17 @@ func (ec *elfCode) loadDataSections() error { continue } - if uint64(v.Offset) != ev.offset { - return fmt.Errorf("data section %s: variable %s datasec offset (%d) doesn't match ELF symbol offset (%d)", sec.Name, name, v.Offset, ev.offset) + if v.Offset != ev.Offset { + return fmt.Errorf("data section %s: variable %s datasec offset (%d) doesn't match ELF symbol offset (%d)", sec.Name, name, v.Offset, ev.Offset) } - if uint64(v.Size) != ev.size { - return fmt.Errorf("data section %s: variable %s size in datasec (%d) doesn't match ELF symbol size (%d)", sec.Name, name, v.Size, ev.size) + if v.Size != ev.Size() { + return fmt.Errorf("data section %s: variable %s size in datasec (%d) doesn't match ELF symbol size (%d)", sec.Name, name, v.Size, ev.Size()) } // Decouple the Var in the VariableSpec from the underlying DataSec in // the MapSpec to avoid modifications from affecting map loads later on. - ev.t = btf.Copy(vt).(*btf.Var) + ev.Type = btf.Copy(vt).(*btf.Var) } } } @@ -1379,6 +1474,91 @@ func (ec *elfCode) loadKsymsSection() error { return nil } +// associateStructOpsRelocs handles `.struct_ops.link` +// and associates the target function with the correct struct member in the map. +func (ec *elfCode) associateStructOpsRelocs(progs map[string]*ProgramSpec) error { + for _, sec := range ec.sections { + if sec.kind != structOpsSection { + continue + } + + userData, err := sec.Data() + if err != nil { + return fmt.Errorf("failed to read section data: %w", err) + } + + // Resolve the BTF datasec describing variables in this section. + var ds *btf.Datasec + if err := ec.btf.TypeByName(sec.Name, &ds); err != nil { + return fmt.Errorf("datasec %s: %w", sec.Name, err) + } + + // Set flags for .struct_ops.link (BPF_F_LINK). + flags := uint32(0) + if sec.Name == structOpsLinkSec { + flags = sys.BPF_F_LINK + } + + for _, vsi := range ds.Vars { + userSt, baseOff, err := ec.createStructOpsMap(vsi, userData, flags) + if err != nil { + return err + } + + if err := structOpsSetAttachTo(sec, baseOff, userSt, progs); err != nil { + return err + } + } + } + + return nil +} + +// createStructOpsMap() creates and registers a MapSpec for a struct_ops +func (ec *elfCode) createStructOpsMap(vsi btf.VarSecinfo, userData []byte, flags uint32) (*btf.Struct, uint32, error) { + varType, ok := btf.As[*btf.Var](vsi.Type) + if !ok { + return nil, 0, fmt.Errorf("vsi: expect var, got %T", vsi.Type) + } + + mapName := varType.Name + + userSt, ok := btf.As[*btf.Struct](varType.Type) + if !ok { + return nil, 0, fmt.Errorf("var %s: expect struct, got %T", varType.Name, varType.Type) + } + + userSize := userSt.Size + baseOff := vsi.Offset + if baseOff+userSize > uint32(len(userData)) { + return nil, 0, fmt.Errorf("%s exceeds section", mapName) + } + + // Register the MapSpec for this struct_ops instance if doesn't exist + if _, exists := ec.maps[mapName]; exists { + return nil, 0, fmt.Errorf("struct_ops map %s: already exists", mapName) + } + + ec.maps[mapName] = &MapSpec{ + Name: mapName, + Type: StructOpsMap, + Key: &btf.Int{Size: 4}, + KeySize: structOpsKeySize, + ValueSize: userSize, // length of the user-struct type + Value: userSt, + Flags: flags, + MaxEntries: 1, + Contents: []MapKV{ + { + Key: uint32(0), + Value: append([]byte(nil), userData[baseOff:baseOff+userSize]...), + }, + }, + } + + return userSt, baseOff, nil +} + type libbpfElfSectionDef struct { pattern string programType sys.ProgType @@ -1399,29 +1579,9 @@ const ( _SEC_XDP_FRAGS _SEC_USDT - // Ignore any present extra in order to preserve backwards compatibility - // with earlier versions of the library. - ignoreExtra - _SEC_ATTACHABLE_OPT = _SEC_ATTACHABLE | _SEC_EXP_ATTACH_OPT ) -func init() { - // Compatibility with older versions of the library. - // We prepend libbpf definitions since they contain a prefix match - // for "xdp". - elfSectionDefs = append([]libbpfElfSectionDef{ - {"xdp.frags/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP, _SEC_XDP_FRAGS | ignoreExtra}, - {"xdp.frags_devmap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_DEVMAP, _SEC_XDP_FRAGS}, - {"xdp_devmap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_DEVMAP, 0}, - {"xdp.frags_cpumap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_CPUMAP, _SEC_XDP_FRAGS}, - {"xdp_cpumap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_CPUMAP, 0}, - // This has been in the library since the beginning of time. Not sure - // where it came from. - {"seccomp", sys.BPF_PROG_TYPE_SOCKET_FILTER, 0, _SEC_NONE}, - }, elfSectionDefs...) -} - func getProgType(sectionName string) (ProgramType, AttachType, uint32, string) { // Skip optional program marking for now. sectionName = strings.TrimPrefix(sectionName, "?") @@ -1442,15 +1602,13 @@ func getProgType(sectionName string) (ProgramType, AttachType, uint32, string) { if t.flags&_SEC_XDP_FRAGS > 0 { flags |= sys.BPF_F_XDP_HAS_FRAGS } - if t.flags&_SEC_EXP_ATTACH_OPT > 0 { - if programType == XDP { - // The library doesn't yet have code to fallback to not specifying - // attach type. Only do this for XDP since we've enforced correct - // attach type for all other program types. - attachType = AttachNone - } - } - if t.flags&ignoreExtra > 0 { + + // The libbpf documentation on program types states: 'The struct_ops attach + // format supports struct_ops[.s]/ convention, but name is ignored and + // it is recommended to just use plain SEC("struct_ops[.s]").' + // + // Ignore any extra for struct_ops to match libbpf behaviour. + if programType == StructOps { extra = "" } diff --git a/vendor/github.com/cilium/ebpf/elf_sections.go b/vendor/github.com/cilium/ebpf/elf_sections.go index 43dcfb103..e48b84a79 100644 --- a/vendor/github.com/cilium/ebpf/elf_sections.go +++ b/vendor/github.com/cilium/ebpf/elf_sections.go @@ -21,8 +21,10 @@ var elfSectionDefs = []libbpfElfSectionDef{ {"kprobe.session+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_KPROBE_SESSION, _SEC_NONE}, {"uprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_NONE}, {"uretprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_NONE}, + {"uprobe.session+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_SESSION, _SEC_NONE}, {"uprobe.multi.s+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_SLEEPABLE}, {"uretprobe.multi.s+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_SLEEPABLE}, + {"uprobe.session.s+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_SESSION, _SEC_SLEEPABLE}, {"ksyscall+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE}, {"kretsyscall+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE}, {"usdt+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_USDT}, diff --git a/vendor/github.com/cilium/ebpf/features/doc.go b/vendor/github.com/cilium/ebpf/features/doc.go new file mode 100644 index 000000000..acc57e3b1 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/features/doc.go @@ -0,0 +1,19 @@ +// Package features allows probing for BPF features available to the calling process. +// +// In general, the error return values from feature probes in this package +// all have the following semantics unless otherwise specified: +// +// err == nil: The feature is available. +// errors.Is(err, ebpf.ErrNotSupported): The feature is not available. +// err != nil: Any errors encountered during probe execution, wrapped. +// +// Note that the latter case may include false negatives, and that resource +// creation may succeed despite an error being returned. For example, some +// map and program types cannot reliably be probed and will return an +// inconclusive error. +// +// As a rule, only `nil` and `ebpf.ErrNotSupported` are conclusive. +// +// Probe results are cached by the library and persist throughout any changes +// to the process' environment, like capability changes. +package features diff --git a/vendor/github.com/cilium/ebpf/features/link.go b/vendor/github.com/cilium/ebpf/features/link.go new file mode 100644 index 000000000..4f440e7bc --- /dev/null +++ b/vendor/github.com/cilium/ebpf/features/link.go @@ -0,0 +1,157 @@ +package features + +import ( + "errors" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +// HaveBPFLinkUprobeMulti probes the running kernel if uprobe_multi link is supported. +// +// See the package documentation for the meaning of the error return value. +func HaveBPFLinkUprobeMulti() error { + return haveBPFLinkUprobeMulti() +} + +var haveBPFLinkUprobeMulti = internal.NewFeatureTest("bpf_link_uprobe_multi", func() error { + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Name: "probe_upm_link", + Type: ebpf.Kprobe, + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + AttachType: ebpf.AttachTraceUprobeMulti, + License: "MIT", + }) + if errors.Is(err, unix.E2BIG) { + // Kernel doesn't support AttachType field. + return ebpf.ErrNotSupported + } + if err != nil { + return err + } + defer prog.Close() + + // We try to create uprobe multi link on '/' path which results in + // error with -EBADF in case uprobe multi link is supported. + fd, err := sys.LinkCreateUprobeMulti(&sys.LinkCreateUprobeMultiAttr{ + ProgFd: uint32(prog.FD()), + AttachType: sys.BPF_TRACE_UPROBE_MULTI, + Path: sys.NewStringPointer("/"), + Offsets: sys.SlicePointer([]uint64{0}), + Count: 1, + }) + switch { + case errors.Is(err, unix.EBADF): + return nil + case errors.Is(err, unix.EINVAL): + return ebpf.ErrNotSupported + case err != nil: + return err + } + + // should not happen + fd.Close() + return errors.New("successfully attached uprobe_multi to /, kernel bug?") +}, "6.6") + +// HaveBPFLinkKprobeMulti probes the running kernel if kprobe_multi link is supported. +// +// See the package documentation for the meaning of the error return value. +func HaveBPFLinkKprobeMulti() error { + return haveBPFLinkKprobeMulti() +} + +var haveBPFLinkKprobeMulti = internal.NewFeatureTest("bpf_link_kprobe_multi", func() error { + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Name: "probe_kpm_link", + Type: ebpf.Kprobe, + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + AttachType: ebpf.AttachTraceKprobeMulti, + License: "MIT", + }) + if errors.Is(err, unix.E2BIG) { + // Kernel doesn't support AttachType field. + return ebpf.ErrNotSupported + } + if err != nil { + return err + } + defer prog.Close() + + fd, err := sys.LinkCreateKprobeMulti(&sys.LinkCreateKprobeMultiAttr{ + ProgFd: uint32(prog.FD()), + AttachType: sys.BPF_TRACE_KPROBE_MULTI, + Count: 1, + Syms: sys.NewStringSlicePointer([]string{"vprintk"}), + }) + switch { + case errors.Is(err, unix.EINVAL): + return ebpf.ErrNotSupported + // If CONFIG_FPROBE isn't set. + case errors.Is(err, unix.EOPNOTSUPP): + return ebpf.ErrNotSupported + case err != nil: + return err + } + + fd.Close() + + return nil +}, "5.18") + +// HaveBPFLinkKprobeSession probes the running kernel if kprobe_session link is supported. +// +// See the package documentation for the meaning of the error return value. +func HaveBPFLinkKprobeSession() error { + return haveBPFLinkKprobeSession() +} + +var haveBPFLinkKprobeSession = internal.NewFeatureTest("bpf_link_kprobe_session", func() error { + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Name: "probe_kps_link", + Type: ebpf.Kprobe, + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + AttachType: ebpf.AttachTraceKprobeSession, + License: "MIT", + }) + if errors.Is(err, unix.E2BIG) { + // Kernel doesn't support AttachType field. + return ebpf.ErrNotSupported + } + if err != nil { + return err + } + defer prog.Close() + + fd, err := sys.LinkCreateKprobeMulti(&sys.LinkCreateKprobeMultiAttr{ + ProgFd: uint32(prog.FD()), + AttachType: sys.BPF_TRACE_KPROBE_SESSION, + Count: 1, + Syms: sys.NewStringSlicePointer([]string{"vprintk"}), + }) + switch { + case errors.Is(err, unix.EINVAL): + return ebpf.ErrNotSupported + // If CONFIG_FPROBE isn't set. + case errors.Is(err, unix.EOPNOTSUPP): + return ebpf.ErrNotSupported + case err != nil: + return err + } + + fd.Close() + + return nil +}, "6.10") diff --git a/vendor/github.com/cilium/ebpf/features/map.go b/vendor/github.com/cilium/ebpf/features/map.go new file mode 100644 index 000000000..75212552c --- /dev/null +++ b/vendor/github.com/cilium/ebpf/features/map.go @@ -0,0 +1,362 @@ +package features + +import ( + "errors" + "fmt" + "os" + "unsafe" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +// HaveMapType probes the running kernel for the availability of the specified map type. +// +// See the package documentation for the meaning of the error return value. +func HaveMapType(mt ebpf.MapType) error { + return haveMapTypeMatrix.Result(mt) +} + +func probeCgroupStorageMap(mt sys.MapType) error { + // keySize needs to be sizeof(struct{u32 + u64}) = 12 (+ padding = 16) + // by using unsafe.Sizeof(int) we are making sure that this works on 32bit and 64bit archs + return createMap(&sys.MapCreateAttr{ + MapType: mt, + ValueSize: 4, + KeySize: uint32(8 + unsafe.Sizeof(int(0))), + MaxEntries: 0, + }) +} + +func probeStorageMap(mt sys.MapType) error { + // maxEntries needs to be 0 + // BPF_F_NO_PREALLOC needs to be set + // btf* fields need to be set + // see alloc_check for local_storage map types + err := createMap(&sys.MapCreateAttr{ + MapType: mt, + KeySize: 4, + ValueSize: 4, + MaxEntries: 0, + MapFlags: sys.BPF_F_NO_PREALLOC, + BtfKeyTypeId: 1, + BtfValueTypeId: 1, + BtfFd: ^uint32(0), + }) + if errors.Is(err, unix.EBADF) { + // Triggered by BtfFd. + return nil + } + return err +} + +func probeNestedMap(mt sys.MapType) error { + // assign invalid innerMapFd to pass validation check + // will return EBADF + err := probeMap(&sys.MapCreateAttr{ + MapType: mt, + InnerMapFd: ^uint32(0), + }) + if errors.Is(err, unix.EBADF) { + return nil + } + return err +} + +func probeMap(attr *sys.MapCreateAttr) error { + if attr.KeySize == 0 { + attr.KeySize = 4 + } + if attr.ValueSize == 0 { + attr.ValueSize = 4 + } + attr.MaxEntries = 1 + return createMap(attr) +} + +func createMap(attr *sys.MapCreateAttr) error { + fd, err := sys.MapCreate(attr) + if err == nil { + fd.Close() + return nil + } + + switch { + // EINVAL occurs when attempting to create a map with an unknown type. + // E2BIG occurs when MapCreateAttr contains non-zero bytes past the end + // of the struct known by the running kernel, meaning the kernel is too old + // to support the given map type. + case errors.Is(err, unix.EINVAL), errors.Is(err, unix.E2BIG): + return ebpf.ErrNotSupported + } + + return err +} + +var haveMapTypeMatrix = internal.FeatureMatrix[ebpf.MapType]{ + ebpf.Hash: {Version: "3.19"}, + ebpf.Array: {Version: "3.19"}, + ebpf.ProgramArray: {Version: "4.2"}, + ebpf.PerfEventArray: {Version: "4.3"}, + ebpf.PerCPUHash: {Version: "4.6"}, + ebpf.PerCPUArray: {Version: "4.6"}, + ebpf.StackTrace: { + Version: "4.6", + Fn: func() error { + return probeMap(&sys.MapCreateAttr{ + MapType: sys.BPF_MAP_TYPE_STACK_TRACE, + ValueSize: 8, // sizeof(uint64) + }) + }, + }, + ebpf.CGroupArray: {Version: "4.8"}, + ebpf.LRUHash: {Version: "4.10"}, + ebpf.LRUCPUHash: {Version: "4.10"}, + ebpf.LPMTrie: { + Version: "4.11", + Fn: func() error { + // keySize and valueSize need to be sizeof(struct{u32 + u8}) + 1 + padding = 8 + // BPF_F_NO_PREALLOC needs to be set + return probeMap(&sys.MapCreateAttr{ + MapType: sys.BPF_MAP_TYPE_LPM_TRIE, + KeySize: 8, + ValueSize: 8, + MapFlags: sys.BPF_F_NO_PREALLOC, + }) + }, + }, + ebpf.ArrayOfMaps: { + Version: "4.12", + Fn: func() error { return probeNestedMap(sys.BPF_MAP_TYPE_ARRAY_OF_MAPS) }, + }, + ebpf.HashOfMaps: { + Version: "4.12", + Fn: func() error { return probeNestedMap(sys.BPF_MAP_TYPE_HASH_OF_MAPS) }, + }, + ebpf.DevMap: {Version: "4.14"}, + ebpf.SockMap: {Version: "4.14"}, + ebpf.CPUMap: {Version: "4.15"}, + ebpf.XSKMap: {Version: "4.18"}, + ebpf.SockHash: {Version: "4.18"}, + ebpf.CGroupStorage: { + Version: "4.19", + Fn: func() error { return probeCgroupStorageMap(sys.BPF_MAP_TYPE_CGROUP_STORAGE) }, + }, + ebpf.ReusePortSockArray: {Version: "4.19"}, + ebpf.PerCPUCGroupStorage: { + Version: "4.20", + Fn: func() error { return probeCgroupStorageMap(sys.BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) }, + }, + ebpf.Queue: { + Version: "4.20", + Fn: func() error { + return createMap(&sys.MapCreateAttr{ + MapType: sys.BPF_MAP_TYPE_QUEUE, + KeySize: 0, + ValueSize: 4, + MaxEntries: 1, + }) + }, + }, + ebpf.Stack: { + Version: "4.20", + Fn: func() error { + return createMap(&sys.MapCreateAttr{ + MapType: sys.BPF_MAP_TYPE_STACK, + KeySize: 0, + ValueSize: 4, + MaxEntries: 1, + }) + }, + }, + ebpf.SkStorage: { + Version: "5.2", + Fn: func() error { return probeStorageMap(sys.BPF_MAP_TYPE_SK_STORAGE) }, + }, + ebpf.DevMapHash: {Version: "5.4"}, + ebpf.StructOpsMap: { + Version: "5.6", + Fn: func() error { + // StructOps requires setting a vmlinux type id, but id 1 will always + // resolve to some type of integer. This will cause ENOTSUPP. + err := probeMap(&sys.MapCreateAttr{ + MapType: sys.BPF_MAP_TYPE_STRUCT_OPS, + BtfVmlinuxValueTypeId: 1, + }) + if errors.Is(err, sys.ENOTSUPP) { + // ENOTSUPP means the map type is at least known to the kernel. + return nil + } + return err + }, + }, + ebpf.RingBuf: { + Version: "5.8", + Fn: func() error { + // keySize and valueSize need to be 0 + // maxEntries needs to be power of 2 and PAGE_ALIGNED + return createMap(&sys.MapCreateAttr{ + MapType: sys.BPF_MAP_TYPE_RINGBUF, + KeySize: 0, + ValueSize: 0, + MaxEntries: uint32(os.Getpagesize()), + }) + }, + }, + ebpf.InodeStorage: { + Version: "5.10", + Fn: func() error { return probeStorageMap(sys.BPF_MAP_TYPE_INODE_STORAGE) }, + }, + ebpf.TaskStorage: { + Version: "5.11", + Fn: func() error { return probeStorageMap(sys.BPF_MAP_TYPE_TASK_STORAGE) }, + }, + ebpf.BloomFilter: { + Version: "5.16", + Fn: func() error { + return createMap(&sys.MapCreateAttr{ + MapType: sys.BPF_MAP_TYPE_BLOOM_FILTER, + KeySize: 0, + ValueSize: 4, + MaxEntries: 1, + }) + }, + }, + ebpf.UserRingbuf: { + Version: "6.1", + Fn: func() error { + // keySize and valueSize need to be 0 + // maxEntries needs to be power of 2 and PAGE_ALIGNED + return createMap(&sys.MapCreateAttr{ + MapType: sys.BPF_MAP_TYPE_USER_RINGBUF, + KeySize: 0, + ValueSize: 0, + MaxEntries: uint32(os.Getpagesize()), + }) + }, + }, + ebpf.CgroupStorage: { + Version: "6.2", + Fn: func() error { return probeStorageMap(sys.BPF_MAP_TYPE_CGRP_STORAGE) }, + }, + ebpf.Arena: { + Version: "6.9", + Fn: func() error { + return createMap(&sys.MapCreateAttr{ + MapType: sys.BPF_MAP_TYPE_ARENA, + KeySize: 0, + ValueSize: 0, + MaxEntries: 1, // one page + MapExtra: 0, // can mmap() at any address + MapFlags: sys.BPF_F_MMAPABLE, + }) + }, + }, +} + +func init() { + for mt, ft := range haveMapTypeMatrix { + ft.Name = mt.String() + if ft.Fn == nil { + // Avoid referring to the loop variable in the closure. + mt := sys.MapType(mt) + ft.Fn = func() error { return probeMap(&sys.MapCreateAttr{MapType: mt}) } + } + } +} + +// MapFlags document which flags may be feature probed. +type MapFlags uint32 + +// Flags which may be feature probed. +const ( + BPF_F_NO_PREALLOC = sys.BPF_F_NO_PREALLOC + BPF_F_RDONLY_PROG = sys.BPF_F_RDONLY_PROG + BPF_F_WRONLY_PROG = sys.BPF_F_WRONLY_PROG + BPF_F_MMAPABLE = sys.BPF_F_MMAPABLE + BPF_F_INNER_MAP = sys.BPF_F_INNER_MAP +) + +// HaveMapFlag probes the running kernel for the availability of the specified map flag. +// +// Returns an error if flag is not one of the flags declared in this package. +// See the package documentation for the meaning of the error return value. +func HaveMapFlag(flag MapFlags) (err error) { + return haveMapFlagsMatrix.Result(flag) +} + +func probeMapFlag(attr *sys.MapCreateAttr) error { + // For now, we do not check if the map type is supported because we only support + // probing for flags defined on arrays and hashes that are always supported. + // In the future, if we allow probing on flags defined on newer types, checking for map type + // support will be required. + if attr.MapType == sys.BPF_MAP_TYPE_UNSPEC { + attr.MapType = sys.BPF_MAP_TYPE_ARRAY + } + + attr.KeySize = 4 + attr.ValueSize = 4 + attr.MaxEntries = 1 + + fd, err := sys.MapCreate(attr) + if err == nil { + fd.Close() + } else if errors.Is(err, unix.EINVAL) { + // EINVAL occurs when attempting to create a map with an unknown type or an unknown flag. + err = ebpf.ErrNotSupported + } + + return err +} + +var haveMapFlagsMatrix = internal.FeatureMatrix[MapFlags]{ + BPF_F_NO_PREALLOC: { + Version: "4.6", + Fn: func() error { + return probeMapFlag(&sys.MapCreateAttr{ + MapType: sys.BPF_MAP_TYPE_HASH, + MapFlags: BPF_F_NO_PREALLOC, + }) + }, + }, + BPF_F_RDONLY_PROG: { + Version: "5.2", + Fn: func() error { + return probeMapFlag(&sys.MapCreateAttr{ + MapFlags: BPF_F_RDONLY_PROG, + }) + }, + }, + BPF_F_WRONLY_PROG: { + Version: "5.2", + Fn: func() error { + return probeMapFlag(&sys.MapCreateAttr{ + MapFlags: BPF_F_WRONLY_PROG, + }) + }, + }, + BPF_F_MMAPABLE: { + Version: "5.5", + Fn: func() error { + return probeMapFlag(&sys.MapCreateAttr{ + MapFlags: BPF_F_MMAPABLE, + }) + }, + }, + BPF_F_INNER_MAP: { + Version: "5.10", + Fn: func() error { + return probeMapFlag(&sys.MapCreateAttr{ + MapFlags: BPF_F_INNER_MAP, + }) + }, + }, +} + +func init() { + for mf, ft := range haveMapFlagsMatrix { + ft.Name = fmt.Sprint(mf) + } +} diff --git a/vendor/github.com/cilium/ebpf/features/misc.go b/vendor/github.com/cilium/ebpf/features/misc.go new file mode 100644 index 000000000..c039020a9 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/features/misc.go @@ -0,0 +1,135 @@ +package features + +import ( + "errors" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" +) + +// HaveLargeInstructions probes the running kernel if more than 4096 instructions +// per program are supported. +// +// Upstream commit c04c0d2b968a ("bpf: increase complexity limit and maximum program size"). +// +// See the package documentation for the meaning of the error return value. +func HaveLargeInstructions() error { + return haveLargeInstructions() +} + +var haveLargeInstructions = internal.NewFeatureTest(">4096 instructions", func() error { + const maxInsns = 4096 + + insns := make(asm.Instructions, maxInsns, maxInsns+1) + for i := range insns { + insns[i] = asm.Mov.Imm(asm.R0, 1) + } + insns = append(insns, asm.Return()) + + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.SocketFilter, + Instructions: insns, + }) +}, "5.2") + +// HaveBoundedLoops probes the running kernel if bounded loops are supported. +// +// Upstream commit 2589726d12a1 ("bpf: introduce bounded loops"). +// +// See the package documentation for the meaning of the error return value. +func HaveBoundedLoops() error { + return haveBoundedLoops() +} + +var haveBoundedLoops = internal.NewFeatureTest("bounded loops", func() error { + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.SocketFilter, + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 10), + asm.Sub.Imm(asm.R0, 1).WithSymbol("loop"), + asm.JNE.Imm(asm.R0, 0, "loop"), + asm.Return(), + }, + }) +}, "5.3") + +// HaveV2ISA probes the running kernel if instructions of the v2 ISA are supported. +// +// Upstream commit 92b31a9af73b ("bpf: add BPF_J{LT,LE,SLT,SLE} instructions"). +// +// See the package documentation for the meaning of the error return value. +func HaveV2ISA() error { + return haveV2ISA() +} + +var haveV2ISA = internal.NewFeatureTest("v2 ISA", func() error { + err := probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.SocketFilter, + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.JLT.Imm(asm.R0, 0, "exit"), + asm.Mov.Imm(asm.R0, 1), + asm.Return().WithSymbol("exit"), + }, + }) + // This sometimes bubbles up from the JIT on aarch64. + if errors.Is(err, sys.ENOTSUPP) { + return ebpf.ErrNotSupported + } + return err +}, "4.14") + +// HaveV3ISA probes the running kernel if instructions of the v3 ISA are supported. +// +// Upstream commit 092ed0968bb6 ("bpf: verifier support JMP32"). +// +// See the package documentation for the meaning of the error return value. +func HaveV3ISA() error { + return haveV3ISA() +} + +var haveV3ISA = internal.NewFeatureTest("v3 ISA", func() error { + err := probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.SocketFilter, + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.JLT.Imm32(asm.R0, 0, "exit"), + asm.Mov.Imm(asm.R0, 1), + asm.Return().WithSymbol("exit"), + }, + }) + // This sometimes bubbles up from the JIT on aarch64. + if errors.Is(err, sys.ENOTSUPP) { + return ebpf.ErrNotSupported + } + return err +}, "5.1") + +// HaveV4ISA probes the running kernel if instructions of the v4 ISA are supported. +// +// Upstream commit 1f9a1ea821ff ("bpf: Support new sign-extension load insns"). +// +// See the package documentation for the meaning of the error return value. +func HaveV4ISA() error { + return haveV4ISA() +} + +var haveV4ISA = internal.NewFeatureTest("v4 ISA", func() error { + err := probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.SocketFilter, + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.JEq.Imm(asm.R0, 1, "error"), + asm.LongJump("exit"), + asm.Mov.Imm(asm.R0, 1).WithSymbol("error"), + asm.Return().WithSymbol("exit"), + }, + }) + // This sometimes bubbles up from the JIT on aarch64. + if errors.Is(err, sys.ENOTSUPP) { + return ebpf.ErrNotSupported + } + return err +}, "6.6") diff --git a/vendor/github.com/cilium/ebpf/features/prog.go b/vendor/github.com/cilium/ebpf/features/prog.go new file mode 100644 index 000000000..6441d5931 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/features/prog.go @@ -0,0 +1,332 @@ +package features + +import ( + "errors" + "fmt" + "slices" + "strings" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +// HaveProgramType probes the running kernel for the availability of the specified program type. +// +// See the package documentation for the meaning of the error return value. +func HaveProgramType(pt ebpf.ProgramType) (err error) { + return haveProgramTypeMatrix.Result(pt) +} + +func probeProgram(spec *ebpf.ProgramSpec) error { + if spec.Instructions == nil { + spec.Instructions = asm.Instructions{ + asm.LoadImm(asm.R0, 0, asm.DWord), + asm.Return(), + } + } + prog, err := ebpf.NewProgramWithOptions(spec, ebpf.ProgramOptions{ + LogDisabled: true, + }) + if err == nil { + prog.Close() + } + + switch { + // EINVAL occurs when attempting to create a program with an unknown type. + // E2BIG occurs when ProgLoadAttr contains non-zero bytes past the end + // of the struct known by the running kernel, meaning the kernel is too old + // to support the given prog type. + case errors.Is(err, unix.EINVAL), errors.Is(err, unix.E2BIG): + err = ebpf.ErrNotSupported + } + + return err +} + +var haveProgramTypeMatrix = internal.FeatureMatrix[ebpf.ProgramType]{ + ebpf.SocketFilter: {Version: "3.19"}, + ebpf.Kprobe: {Version: "4.1"}, + ebpf.SchedCLS: {Version: "4.1"}, + ebpf.SchedACT: {Version: "4.1"}, + ebpf.TracePoint: {Version: "4.7"}, + ebpf.XDP: {Version: "4.8"}, + ebpf.PerfEvent: {Version: "4.9"}, + ebpf.CGroupSKB: {Version: "4.10"}, + ebpf.CGroupSock: {Version: "4.10"}, + ebpf.LWTIn: {Version: "4.10"}, + ebpf.LWTOut: {Version: "4.10"}, + ebpf.LWTXmit: {Version: "4.10"}, + ebpf.SockOps: {Version: "4.13"}, + ebpf.SkSKB: {Version: "4.14"}, + ebpf.CGroupDevice: {Version: "4.15"}, + ebpf.SkMsg: {Version: "4.17"}, + ebpf.RawTracepoint: {Version: "4.17"}, + ebpf.CGroupSockAddr: { + Version: "4.17", + Fn: func() error { + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.CGroupSockAddr, + AttachType: ebpf.AttachCGroupInet4Connect, + }) + }, + }, + ebpf.LWTSeg6Local: {Version: "4.18"}, + ebpf.LircMode2: {Version: "4.18"}, + ebpf.SkReuseport: {Version: "4.19"}, + ebpf.FlowDissector: {Version: "4.20"}, + ebpf.CGroupSysctl: {Version: "5.2"}, + ebpf.RawTracepointWritable: {Version: "5.2"}, + ebpf.CGroupSockopt: { + Version: "5.3", + Fn: func() error { + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.CGroupSockopt, + AttachType: ebpf.AttachCGroupGetsockopt, + }) + }, + }, + ebpf.Tracing: { + Version: "5.5", + Fn: func() error { + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.Tracing, + AttachType: ebpf.AttachTraceFEntry, + AttachTo: "bpf_init", + }) + }, + }, + ebpf.StructOps: { + Version: "5.6", + Fn: func() error { + err := probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.StructOps, + License: "GPL", + }) + if errors.Is(err, sys.ENOTSUPP) { + // ENOTSUPP means the program type is at least known to the kernel. + return nil + } + return err + }, + }, + ebpf.Extension: { + Version: "5.6", + Fn: func() error { + // create btf.Func to add to first ins of target and extension so both progs are btf powered + btfFn := btf.Func{ + Name: "a", + Type: &btf.FuncProto{ + Return: &btf.Int{}, + Params: []btf.FuncParam{ + {Name: "ctx", Type: &btf.Pointer{Target: &btf.Struct{Name: "xdp_md"}}}, + }, + }, + Linkage: btf.GlobalFunc, + } + insns := asm.Instructions{ + btf.WithFuncMetadata(asm.Mov.Imm(asm.R0, 0), &btfFn), + asm.Return(), + } + + // create target prog + prog, err := ebpf.NewProgramWithOptions( + &ebpf.ProgramSpec{ + Type: ebpf.XDP, + Instructions: insns, + }, + ebpf.ProgramOptions{ + LogDisabled: true, + }, + ) + if err != nil { + return err + } + defer prog.Close() + + // probe for Extension prog with target + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.Extension, + Instructions: insns, + AttachTarget: prog, + AttachTo: btfFn.Name, + }) + }, + }, + ebpf.LSM: { + Version: "5.7", + Fn: func() error { + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.LSM, + AttachType: ebpf.AttachLSMMac, + AttachTo: "file_mprotect", + License: "GPL", + }) + }, + }, + ebpf.SkLookup: { + Version: "5.9", + Fn: func() error { + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.SkLookup, + AttachType: ebpf.AttachSkLookup, + }) + }, + }, + ebpf.Syscall: { + Version: "5.14", + Fn: func() error { + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.Syscall, + Flags: sys.BPF_F_SLEEPABLE, + }) + }, + }, + ebpf.Netfilter: { + Version: "6.4", + Fn: func() error { + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.Netfilter, + AttachType: ebpf.AttachNetfilter, + }) + }, + }, +} + +func init() { + for key, ft := range haveProgramTypeMatrix { + ft.Name = key.String() + if ft.Fn == nil { + key := key // avoid the dreaded loop variable problem + ft.Fn = func() error { return probeProgram(&ebpf.ProgramSpec{Type: key}) } + } + } +} + +type helperKey struct { + typ ebpf.ProgramType + helper asm.BuiltinFunc +} + +var helperCache = internal.NewFeatureCache(func(key helperKey) *internal.FeatureTest { + return &internal.FeatureTest{ + Name: fmt.Sprintf("%s for program type %s", key.helper, key.typ), + Fn: func() error { + return haveProgramHelper(key.typ, key.helper) + }, + } +}) + +// HaveProgramHelper probes the running kernel for the availability of the specified helper +// function to a specified program type. +// Return values have the following semantics: +// +// err == nil: The feature is available. +// errors.Is(err, ebpf.ErrNotSupported): The feature is not available. +// err != nil: Any errors encountered during probe execution, wrapped. +// +// Note that the latter case may include false negatives, and that program creation may +// succeed despite an error being returned. +// Only `nil` and `ebpf.ErrNotSupported` are conclusive. +// +// Probe results are cached and persist throughout any process capability changes. +func HaveProgramHelper(pt ebpf.ProgramType, helper asm.BuiltinFunc) error { + return helperCache.Result(helperKey{pt, helper}) +} + +func haveProgramHelper(pt ebpf.ProgramType, helper asm.BuiltinFunc) error { + if ok := helperProbeNotImplemented(pt); ok { + return fmt.Errorf("no feature probe for %v/%v", pt, helper) + } + + if err := HaveProgramType(pt); err != nil { + return err + } + + spec := &ebpf.ProgramSpec{ + Type: pt, + Instructions: asm.Instructions{ + helper.Call(), + asm.LoadImm(asm.R0, 0, asm.DWord), + asm.Return(), + }, + License: "GPL", + } + + switch pt { + case ebpf.CGroupSockAddr: + spec.AttachType = ebpf.AttachCGroupInet4Connect + case ebpf.CGroupSockopt: + spec.AttachType = ebpf.AttachCGroupGetsockopt + case ebpf.SkLookup: + spec.AttachType = ebpf.AttachSkLookup + case ebpf.Syscall: + spec.Flags = sys.BPF_F_SLEEPABLE + case ebpf.Netfilter: + spec.AttachType = ebpf.AttachNetfilter + } + + prog, err := ebpf.NewProgramWithOptions(spec, ebpf.ProgramOptions{ + LogLevel: 1, + }) + if err == nil { + prog.Close() + } + + var verr *ebpf.VerifierError + if !errors.As(err, &verr) { + return err + } + + helperTag := fmt.Sprintf("#%d", helper) + + switch { + // EACCES occurs when attempting to create a program probe with a helper + // while the register args when calling this helper aren't set up properly. + // We interpret this as the helper being available, because the verifier + // returns EINVAL if the helper is not supported by the running kernel. + case errors.Is(err, unix.EACCES): + err = nil + + // EINVAL occurs when attempting to create a program with an unknown helper. + case errors.Is(err, unix.EINVAL): + // https://github.com/torvalds/linux/blob/09a0fa92e5b45e99cf435b2fbf5ebcf889cf8780/kernel/bpf/verifier.c#L10663 + if logContainsAll(verr.Log, "invalid func", helperTag) { + return ebpf.ErrNotSupported + } + + // https://github.com/torvalds/linux/blob/09a0fa92e5b45e99cf435b2fbf5ebcf889cf8780/kernel/bpf/verifier.c#L10668 + wrongProgramType := logContainsAll(verr.Log, "program of this type cannot use helper", helperTag) + // https://github.com/torvalds/linux/blob/59b418c7063d30e0a3e1f592d47df096db83185c/kernel/bpf/verifier.c#L10204 + // 4.9 doesn't include # in verifier output. + wrongProgramType = wrongProgramType || logContainsAll(verr.Log, "unknown func") + if wrongProgramType { + return fmt.Errorf("program of this type cannot use helper: %w", ebpf.ErrNotSupported) + } + } + + return err +} + +func logContainsAll(log []string, needles ...string) bool { + first := max(len(log)-5, 0) // Check last 5 lines. + return slices.ContainsFunc(log[first:], func(line string) bool { + for _, needle := range needles { + if !strings.Contains(line, needle) { + return false + } + } + return true + }) +} + +func helperProbeNotImplemented(pt ebpf.ProgramType) bool { + switch pt { + case ebpf.Extension, ebpf.LSM, ebpf.StructOps, ebpf.Tracing: + return true + } + return false +} diff --git a/vendor/github.com/cilium/ebpf/features/version.go b/vendor/github.com/cilium/ebpf/features/version.go new file mode 100644 index 000000000..d54d3ea21 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/features/version.go @@ -0,0 +1,18 @@ +package features + +import "github.com/cilium/ebpf/internal/linux" + +// LinuxVersionCode returns the version of the currently running kernel +// as defined in the LINUX_VERSION_CODE compile-time macro. It is represented +// in the format described by the KERNEL_VERSION macro from linux/version.h. +// +// Do not use the version to make assumptions about the presence of certain +// kernel features, always prefer feature probes in this package. Some +// distributions backport or disable eBPF features. +func LinuxVersionCode() (uint32, error) { + v, err := linux.KernelVersion() + if err != nil { + return 0, err + } + return v.Kernel(), nil +} diff --git a/vendor/github.com/cilium/ebpf/info.go b/vendor/github.com/cilium/ebpf/info.go index 23c819aaa..b52f5f458 100644 --- a/vendor/github.com/cilium/ebpf/info.go +++ b/vendor/github.com/cilium/ebpf/info.go @@ -305,6 +305,7 @@ type ProgramInfo struct { maps []MapID insns []byte + numInsns uint32 jitedSize uint32 verifiedInstructions uint32 @@ -372,6 +373,7 @@ func newProgramInfoFromFd(fd *sys.FD) (*ProgramInfo, error) { jitedSize: info.JitedProgLen, loadTime: time.Duration(info.LoadTime), verifiedInstructions: info.VerifiedInsns, + numInsns: info.XlatedProgLen, } // Supplement OBJ_INFO with data from /proc/self/fdinfo. It contains fields @@ -416,9 +418,20 @@ func newProgramInfoFromFd(fd *sys.FD) (*ProgramInfo, error) { if info.XlatedProgLen > 0 { pi.insns = make([]byte, info.XlatedProgLen) - info2.XlatedProgLen = info.XlatedProgLen - info2.XlatedProgInsns = sys.SlicePointer(pi.insns) - makeSecondCall = true + var info3 sys.ProgInfo + info3.XlatedProgLen = info.XlatedProgLen + info3.XlatedProgInsns = sys.SlicePointer(pi.insns) + + // When kernel.kptr_restrict and net.core.bpf_jit_harden are both set, it causes the + // syscall to abort when trying to readback xlated instructions, skipping other info + // as well. So request xlated instructions separately. + if err := sys.ObjInfo(fd, &info3); err != nil { + return nil, err + } + if info3.XlatedProgInsns.IsNil() { + pi.restricted = true + pi.insns = nil + } } if info.NrLineInfo > 0 { @@ -477,19 +490,53 @@ func newProgramInfoFromFd(fd *sys.FD) (*ProgramInfo, error) { if err := sys.ObjInfo(fd, &info2); err != nil { return nil, err } + if info.JitedProgLen > 0 && info2.JitedProgInsns.IsNil() { + // JIT information is not available due to kernel.kptr_restrict + pi.jitedInfo.lineInfos = nil + pi.jitedInfo.ksyms = nil + pi.jitedInfo.insns = nil + pi.jitedInfo.funcLens = nil + } } - if info.XlatedProgLen > 0 && info2.XlatedProgInsns.IsNil() { - pi.restricted = true - pi.insns = nil - pi.lineInfos = nil - pi.funcInfos = nil - pi.jitedInfo = programJitedInfo{} + if len(pi.Name) == len(info.Name)-1 { // Possibly truncated, check BTF info for full name + name, err := readNameFromFunc(&pi) + if err == nil { + pi.Name = name + } // If an error occurs, keep the truncated name, which is better than none } return &pi, nil } +func readNameFromFunc(pi *ProgramInfo) (string, error) { + if pi.numFuncInfos == 0 { + return "", errors.New("no function info") + } + + spec, err := pi.btfSpec() + if err != nil { + return "", err + } + + funcInfos, err := btf.LoadFuncInfos( + bytes.NewReader(pi.funcInfos), + internal.NativeEndian, + pi.numFuncInfos, + spec, + ) + if err != nil { + return "", err + } + + for _, funcInfo := range funcInfos { + if funcInfo.Offset == 0 { // Information about the whole program + return funcInfo.Func.Name, nil + } + } + return "", errors.New("no function info about program") +} + func readProgramInfoFromProc(fd *sys.FD, pi *ProgramInfo) error { var progType uint32 err := scanFdInfo(fd, map[string]interface{}{ @@ -581,10 +628,6 @@ var ErrRestrictedKernel = internal.ErrRestrictedKernel // ErrNotSupported if the program was created without BTF or if the kernel // doesn't support the field. func (pi *ProgramInfo) LineInfos() (btf.LineOffsets, error) { - if pi.restricted { - return nil, fmt.Errorf("line infos: %w", ErrRestrictedKernel) - } - if len(pi.lineInfos) == 0 { return nil, fmt.Errorf("insufficient permissions or unsupported kernel: %w", ErrNotSupported) } @@ -665,27 +708,20 @@ func (pi *ProgramInfo) Instructions() (asm.Instructions, error) { return nil, fmt.Errorf("unable to get BTF spec: %w", err) } - lineInfos, err := btf.LoadLineInfos( - bytes.NewReader(pi.lineInfos), - internal.NativeEndian, - pi.numLineInfos, - spec, - ) + lineInfos, err := btf.LoadLineInfos(bytes.NewReader(pi.lineInfos), internal.NativeEndian, pi.numLineInfos, spec) if err != nil { return nil, fmt.Errorf("parse line info: %w", err) } - funcInfos, err := btf.LoadFuncInfos( - bytes.NewReader(pi.funcInfos), - internal.NativeEndian, - pi.numFuncInfos, - spec, - ) + funcInfos, err := btf.LoadFuncInfos(bytes.NewReader(pi.funcInfos), internal.NativeEndian, pi.numFuncInfos, spec) if err != nil { return nil, fmt.Errorf("parse func info: %w", err) } - btf.AssignMetadataToInstructions(insns, funcInfos, lineInfos, btf.CORERelocationInfos{}) + iter := insns.Iterate() + for iter.Next() { + assignMetadata(iter.Ins, iter.Offset, &funcInfos, &lineInfos, nil) + } } } @@ -708,10 +744,6 @@ func (pi *ProgramInfo) Instructions() (asm.Instructions, error) { // // Available from 4.13. Reading this metadata requires CAP_BPF or equivalent. func (pi *ProgramInfo) JitedSize() (uint32, error) { - if pi.restricted { - return 0, fmt.Errorf("jited size: %w", ErrRestrictedKernel) - } - if pi.jitedSize == 0 { return 0, fmt.Errorf("insufficient permissions, unsupported kernel, or JIT compiler disabled: %w", ErrNotSupported) } @@ -721,20 +753,12 @@ func (pi *ProgramInfo) JitedSize() (uint32, error) { // TranslatedSize returns the size of the program's translated instructions in // bytes, after it has been verified and rewritten by the kernel. // -// Returns an error wrapping [ErrRestrictedKernel] if translated instructions -// are restricted by sysctls. -// // Available from 4.13. Reading this metadata requires CAP_BPF or equivalent. func (pi *ProgramInfo) TranslatedSize() (int, error) { - if pi.restricted { - return 0, fmt.Errorf("xlated size: %w", ErrRestrictedKernel) - } - - insns := len(pi.insns) - if insns == 0 { + if pi.numInsns == 0 { return 0, fmt.Errorf("insufficient permissions or unsupported kernel: %w", ErrNotSupported) } - return insns, nil + return int(pi.numInsns), nil } // MapIDs returns the maps related to the program. @@ -832,10 +856,6 @@ func (pi *ProgramInfo) JitedFuncLens() ([]uint32, bool) { // ErrNotSupported if the program was created without BTF or if the kernel // doesn't support the field. func (pi *ProgramInfo) FuncInfos() (btf.FuncOffsets, error) { - if pi.restricted { - return nil, fmt.Errorf("func infos: %w", ErrRestrictedKernel) - } - if len(pi.funcInfos) == 0 { return nil, fmt.Errorf("insufficient permissions or unsupported kernel: %w", ErrNotSupported) } diff --git a/vendor/github.com/cilium/ebpf/internal/sys/syscall.go b/vendor/github.com/cilium/ebpf/internal/sys/syscall.go index f2fffd26b..2efab76b5 100644 --- a/vendor/github.com/cilium/ebpf/internal/sys/syscall.go +++ b/vendor/github.com/cilium/ebpf/internal/sys/syscall.go @@ -72,10 +72,30 @@ func (i *KprobeMultiLinkInfo) info() (unsafe.Pointer, uint32) { return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) } +func (i *UprobeMultiLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *RawTracepointLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + func (i *KprobeLinkInfo) info() (unsafe.Pointer, uint32) { return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) } +func (i *UprobeLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *TracepointLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *EventLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + var _ Info = (*BtfInfo)(nil) func (i *BtfInfo) info() (unsafe.Pointer, uint32) { diff --git a/vendor/github.com/cilium/ebpf/internal/sys/types.go b/vendor/github.com/cilium/ebpf/internal/sys/types.go index 2e6674862..cefb9e098 100644 --- a/vendor/github.com/cilium/ebpf/internal/sys/types.go +++ b/vendor/github.com/cilium/ebpf/internal/sys/types.go @@ -59,21 +59,22 @@ const ( BPF_F_INGRESS = 1 BPF_F_INNER_MAP = 4096 BPF_F_INVALIDATE_HASH = 2 + BPF_F_IPV6 = 128 BPF_F_KPROBE_MULTI_RETURN = 1 BPF_F_LINK = 8192 BPF_F_LOCK = 4 BPF_F_MARK_ENFORCE = 64 BPF_F_MARK_MANGLED_0 = 32 BPF_F_MMAPABLE = 1024 - BPF_F_NEIGH = 2 - BPF_F_NEXTHOP = 8 + BPF_F_NEIGH = 65536 + BPF_F_NEXTHOP = 262144 BPF_F_NO_COMMON_LRU = 2 BPF_F_NO_PREALLOC = 1 BPF_F_NO_TUNNEL_KEY = 16 BPF_F_NO_USER_CONV = 262144 BPF_F_NUMA_NODE = 4 BPF_F_PATH_FD = 16384 - BPF_F_PEER = 4 + BPF_F_PEER = 131072 BPF_F_PRESERVE_ELEMS = 2048 BPF_F_PSEUDO_HDR = 16 BPF_F_RDONLY = 8 @@ -101,6 +102,7 @@ const ( BPF_LOAD_HDR_OPT_TCP_SYN = 1 BPF_LOCAL_STORAGE_GET_F_CREATE = 1 BPF_MAX_LOOPS = 8388608 + BPF_MAX_TIMED_LOOPS = 65535 BPF_MAX_TRAMP_LINKS = 38 BPF_NOEXIST = 1 BPF_RB_AVAIL_DATA = 0 @@ -152,9 +154,15 @@ const ( BPF_SOCK_OPS_TCP_CONNECT_CB = 3 BPF_SOCK_OPS_TCP_LISTEN_CB = 11 BPF_SOCK_OPS_TIMEOUT_INIT = 1 + BPF_SOCK_OPS_TSTAMP_ACK_CB = 19 + BPF_SOCK_OPS_TSTAMP_SCHED_CB = 16 + BPF_SOCK_OPS_TSTAMP_SENDMSG_CB = 20 + BPF_SOCK_OPS_TSTAMP_SND_HW_CB = 18 + BPF_SOCK_OPS_TSTAMP_SND_SW_CB = 17 BPF_SOCK_OPS_VOID = 0 BPF_SOCK_OPS_WRITE_HDR_OPT_CB = 15 BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG = 64 + BPF_STREAM_MAX_CAPACITY = 100000 BPF_TASK_ITER_ALL_PROCS = 0 BPF_TASK_ITER_ALL_THREADS = 1 BPF_TASK_ITER_PROC_THREADS = 2 @@ -244,7 +252,8 @@ const ( BPF_NETKIT_PRIMARY AttachType = 54 BPF_NETKIT_PEER AttachType = 55 BPF_TRACE_KPROBE_SESSION AttachType = 56 - __MAX_BPF_ATTACH_TYPE AttachType = 57 + BPF_TRACE_UPROBE_SESSION AttachType = 57 + __MAX_BPF_ATTACH_TYPE AttachType = 58 ) type Cmd uint32 @@ -288,7 +297,8 @@ const ( BPF_LINK_DETACH Cmd = 34 BPF_PROG_BIND_MAP Cmd = 35 BPF_TOKEN_CREATE Cmd = 36 - __MAX_BPF_CMD Cmd = 37 + BPF_PROG_STREAM_READ_BY_FD Cmd = 37 + __MAX_BPF_CMD Cmd = 38 ) type FunctionId uint32 @@ -579,6 +589,18 @@ const ( __MAX_BPF_MAP_TYPE MapType = 34 ) +type NetfilterInetHook uint32 + +const ( + NF_INET_PRE_ROUTING NetfilterInetHook = 0 + NF_INET_LOCAL_IN NetfilterInetHook = 1 + NF_INET_FORWARD NetfilterInetHook = 2 + NF_INET_LOCAL_OUT NetfilterInetHook = 3 + NF_INET_POST_ROUTING NetfilterInetHook = 4 + NF_INET_NUMHOOKS NetfilterInetHook = 5 + NF_INET_INGRESS NetfilterInetHook = 5 +) + type ObjType uint32 const ( @@ -689,6 +711,19 @@ const ( XDP_REDIRECT XdpAction = 4 ) +type NetfilterProtocolFamily uint32 + +const ( + NFPROTO_UNSPEC NetfilterProtocolFamily = 0 + NFPROTO_INET NetfilterProtocolFamily = 1 + NFPROTO_IPV4 NetfilterProtocolFamily = 2 + NFPROTO_ARP NetfilterProtocolFamily = 3 + NFPROTO_NETDEV NetfilterProtocolFamily = 5 + NFPROTO_BRIDGE NetfilterProtocolFamily = 7 + NFPROTO_IPV6 NetfilterProtocolFamily = 10 + NFPROTO_NUMPROTO NetfilterProtocolFamily = 11 +) + type BtfInfo struct { _ structs.HostLayout Btf TypedPointer[uint8] @@ -740,6 +775,9 @@ type MapInfo struct { BtfValueTypeId TypeID BtfVmlinuxId uint32 MapExtra uint64 + Hash uint64 + HashSize uint32 + _ [4]byte } type ProgInfo struct { @@ -947,8 +985,8 @@ type LinkCreateNetfilterAttr struct { TargetFd uint32 AttachType AttachType Flags uint32 - Pf uint32 - Hooknum uint32 + Pf NetfilterProtocolFamily + Hooknum NetfilterInetHook Priority int32 NetfilterFlags uint32 _ [32]byte @@ -1129,6 +1167,9 @@ type MapCreateAttr struct { MapExtra uint64 ValueTypeBtfObjFd int32 MapTokenFd int32 + ExclProgHash uint64 + ExclProgHashSize uint32 + _ [4]byte } func MapCreate(attr *MapCreateAttr) (*FD, error) { @@ -1450,7 +1491,10 @@ type ProgLoadAttr struct { CoreReloRecSize uint32 LogTrueSize uint32 ProgTokenFd int32 - _ [4]byte + FdArrayCnt uint32 + Signature uint64 + SignatureSize uint32 + KeyringId int32 } func ProgLoad(attr *ProgLoadAttr) (*FD, error) { @@ -1533,6 +1577,21 @@ type CgroupLinkInfo struct { _ [36]byte } +type EventLinkInfo struct { + _ structs.HostLayout + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + PerfEventType PerfEventType + _ [4]byte + Config uint64 + EventType uint32 + _ [4]byte + Cookie uint64 + _ [16]byte +} + type IterLinkInfo struct { _ structs.HostLayout Type LinkType @@ -1590,8 +1649,8 @@ type NetfilterLinkInfo struct { Id LinkID ProgId uint32 _ [4]byte - Pf uint32 - Hooknum uint32 + Pf NetfilterProtocolFamily + Hooknum NetfilterInetHook Priority int32 Flags uint32 _ [32]byte @@ -1625,7 +1684,9 @@ type RawTracepointLinkInfo struct { _ [4]byte TpName TypedPointer[uint8] TpNameLen uint32 - _ [36]byte + _ [4]byte + Cookie uint64 + _ [24]byte } type TcxLinkInfo struct { @@ -1639,6 +1700,21 @@ type TcxLinkInfo struct { _ [40]byte } +type TracepointLinkInfo struct { + _ structs.HostLayout + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + PerfEventType PerfEventType + _ [4]byte + TpName TypedPointer[uint8] + NameLen uint32 + _ [4]byte + Cookie uint64 + _ [16]byte +} + type TracingLinkInfo struct { _ structs.HostLayout Type LinkType @@ -1648,7 +1724,41 @@ type TracingLinkInfo struct { AttachType AttachType TargetObjId uint32 TargetBtfId TypeID - _ [36]byte + _ [4]byte + Cookie uint64 + _ [24]byte +} + +type UprobeLinkInfo struct { + _ structs.HostLayout + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + PerfEventType PerfEventType + _ [4]byte + FileName TypedPointer[uint8] + NameLen uint32 + Offset uint32 + Cookie uint64 + RefCtrOffset uint64 + _ [8]byte +} + +type UprobeMultiLinkInfo struct { + _ structs.HostLayout + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + Path TypedPointer[uint8] + Offsets TypedPointer[uint64] + RefCtrOffsets TypedPointer[uint64] + Cookies TypedPointer[uint64] + PathSize uint32 + Count uint32 + Flags uint32 + Pid uint32 } type XDPLinkInfo struct { diff --git a/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go b/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go index 14a0a1929..039816105 100644 --- a/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go +++ b/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go @@ -10,69 +10,72 @@ import ( ) const ( - BPF_F_NO_PREALLOC = linux.BPF_F_NO_PREALLOC - BPF_F_NUMA_NODE = linux.BPF_F_NUMA_NODE - BPF_F_RDONLY = linux.BPF_F_RDONLY - BPF_F_WRONLY = linux.BPF_F_WRONLY - BPF_F_RDONLY_PROG = linux.BPF_F_RDONLY_PROG - BPF_F_WRONLY_PROG = linux.BPF_F_WRONLY_PROG - BPF_F_SLEEPABLE = linux.BPF_F_SLEEPABLE - BPF_F_XDP_HAS_FRAGS = linux.BPF_F_XDP_HAS_FRAGS - BPF_F_MMAPABLE = linux.BPF_F_MMAPABLE - BPF_F_INNER_MAP = linux.BPF_F_INNER_MAP - BPF_F_KPROBE_MULTI_RETURN = linux.BPF_F_KPROBE_MULTI_RETURN - BPF_F_UPROBE_MULTI_RETURN = linux.BPF_F_UPROBE_MULTI_RETURN - BPF_F_LOCK = linux.BPF_F_LOCK - BPF_OBJ_NAME_LEN = linux.BPF_OBJ_NAME_LEN - BPF_TAG_SIZE = linux.BPF_TAG_SIZE - BPF_RINGBUF_BUSY_BIT = linux.BPF_RINGBUF_BUSY_BIT - BPF_RINGBUF_DISCARD_BIT = linux.BPF_RINGBUF_DISCARD_BIT - BPF_RINGBUF_HDR_SZ = linux.BPF_RINGBUF_HDR_SZ - SYS_BPF = linux.SYS_BPF - F_DUPFD_CLOEXEC = linux.F_DUPFD_CLOEXEC - EPOLL_CTL_ADD = linux.EPOLL_CTL_ADD - EPOLL_CLOEXEC = linux.EPOLL_CLOEXEC - O_CLOEXEC = linux.O_CLOEXEC - O_NONBLOCK = linux.O_NONBLOCK - PROT_NONE = linux.PROT_NONE - PROT_READ = linux.PROT_READ - PROT_WRITE = linux.PROT_WRITE - MAP_ANON = linux.MAP_ANON - MAP_SHARED = linux.MAP_SHARED - MAP_FIXED = linux.MAP_FIXED - MAP_PRIVATE = linux.MAP_PRIVATE - PERF_ATTR_SIZE_VER1 = linux.PERF_ATTR_SIZE_VER1 - PERF_TYPE_SOFTWARE = linux.PERF_TYPE_SOFTWARE - PERF_TYPE_TRACEPOINT = linux.PERF_TYPE_TRACEPOINT - PERF_COUNT_SW_BPF_OUTPUT = linux.PERF_COUNT_SW_BPF_OUTPUT - PERF_EVENT_IOC_DISABLE = linux.PERF_EVENT_IOC_DISABLE - PERF_EVENT_IOC_ENABLE = linux.PERF_EVENT_IOC_ENABLE - PERF_EVENT_IOC_SET_BPF = linux.PERF_EVENT_IOC_SET_BPF - PerfBitWatermark = linux.PerfBitWatermark - PerfBitWriteBackward = linux.PerfBitWriteBackward - PERF_SAMPLE_RAW = linux.PERF_SAMPLE_RAW - PERF_FLAG_FD_CLOEXEC = linux.PERF_FLAG_FD_CLOEXEC - RLIM_INFINITY = linux.RLIM_INFINITY - RLIMIT_MEMLOCK = linux.RLIMIT_MEMLOCK - BPF_STATS_RUN_TIME = linux.BPF_STATS_RUN_TIME - PERF_RECORD_LOST = linux.PERF_RECORD_LOST - PERF_RECORD_SAMPLE = linux.PERF_RECORD_SAMPLE - AT_FDCWD = linux.AT_FDCWD - RENAME_NOREPLACE = linux.RENAME_NOREPLACE - SO_ATTACH_BPF = linux.SO_ATTACH_BPF - SO_DETACH_BPF = linux.SO_DETACH_BPF - SOL_SOCKET = linux.SOL_SOCKET - SIGPROF = linux.SIGPROF - SIGUSR1 = linux.SIGUSR1 - SIG_BLOCK = linux.SIG_BLOCK - SIG_UNBLOCK = linux.SIG_UNBLOCK - BPF_FS_MAGIC = linux.BPF_FS_MAGIC - TRACEFS_MAGIC = linux.TRACEFS_MAGIC - DEBUGFS_MAGIC = linux.DEBUGFS_MAGIC - BPF_RB_NO_WAKEUP = linux.BPF_RB_NO_WAKEUP - BPF_RB_FORCE_WAKEUP = linux.BPF_RB_FORCE_WAKEUP - AF_UNSPEC = linux.AF_UNSPEC - IFF_UP = linux.IFF_UP + BPF_F_NO_PREALLOC = linux.BPF_F_NO_PREALLOC + BPF_F_NUMA_NODE = linux.BPF_F_NUMA_NODE + BPF_F_RDONLY = linux.BPF_F_RDONLY + BPF_F_WRONLY = linux.BPF_F_WRONLY + BPF_F_RDONLY_PROG = linux.BPF_F_RDONLY_PROG + BPF_F_WRONLY_PROG = linux.BPF_F_WRONLY_PROG + BPF_F_SLEEPABLE = linux.BPF_F_SLEEPABLE + BPF_F_XDP_HAS_FRAGS = linux.BPF_F_XDP_HAS_FRAGS + BPF_F_MMAPABLE = linux.BPF_F_MMAPABLE + BPF_F_INNER_MAP = linux.BPF_F_INNER_MAP + BPF_F_KPROBE_MULTI_RETURN = linux.BPF_F_KPROBE_MULTI_RETURN + BPF_F_UPROBE_MULTI_RETURN = linux.BPF_F_UPROBE_MULTI_RETURN + BPF_F_LOCK = linux.BPF_F_LOCK + BPF_OBJ_NAME_LEN = linux.BPF_OBJ_NAME_LEN + BPF_TAG_SIZE = linux.BPF_TAG_SIZE + BPF_RINGBUF_BUSY_BIT = linux.BPF_RINGBUF_BUSY_BIT + BPF_RINGBUF_DISCARD_BIT = linux.BPF_RINGBUF_DISCARD_BIT + BPF_RINGBUF_HDR_SZ = linux.BPF_RINGBUF_HDR_SZ + SYS_BPF = linux.SYS_BPF + F_DUPFD_CLOEXEC = linux.F_DUPFD_CLOEXEC + EPOLL_CTL_ADD = linux.EPOLL_CTL_ADD + EPOLL_CLOEXEC = linux.EPOLL_CLOEXEC + O_RDONLY = linux.O_RDONLY + O_CLOEXEC = linux.O_CLOEXEC + O_NONBLOCK = linux.O_NONBLOCK + PROT_NONE = linux.PROT_NONE + PROT_READ = linux.PROT_READ + PROT_WRITE = linux.PROT_WRITE + MAP_ANON = linux.MAP_ANON + MAP_SHARED = linux.MAP_SHARED + MAP_FIXED = linux.MAP_FIXED + MAP_PRIVATE = linux.MAP_PRIVATE + PERF_ATTR_SIZE_VER1 = linux.PERF_ATTR_SIZE_VER1 + PERF_TYPE_SOFTWARE = linux.PERF_TYPE_SOFTWARE + PERF_TYPE_TRACEPOINT = linux.PERF_TYPE_TRACEPOINT + PERF_COUNT_SW_BPF_OUTPUT = linux.PERF_COUNT_SW_BPF_OUTPUT + PERF_EVENT_IOC_DISABLE = linux.PERF_EVENT_IOC_DISABLE + PERF_EVENT_IOC_ENABLE = linux.PERF_EVENT_IOC_ENABLE + PERF_EVENT_IOC_SET_BPF = linux.PERF_EVENT_IOC_SET_BPF + PerfBitWatermark = linux.PerfBitWatermark + PerfBitWriteBackward = linux.PerfBitWriteBackward + PERF_SAMPLE_RAW = linux.PERF_SAMPLE_RAW + PERF_FLAG_FD_CLOEXEC = linux.PERF_FLAG_FD_CLOEXEC + RLIM_INFINITY = linux.RLIM_INFINITY + RLIMIT_MEMLOCK = linux.RLIMIT_MEMLOCK + BPF_STATS_RUN_TIME = linux.BPF_STATS_RUN_TIME + PERF_RECORD_LOST = linux.PERF_RECORD_LOST + PERF_RECORD_SAMPLE = linux.PERF_RECORD_SAMPLE + AT_FDCWD = linux.AT_FDCWD + RENAME_NOREPLACE = linux.RENAME_NOREPLACE + SO_ATTACH_BPF = linux.SO_ATTACH_BPF + SO_DETACH_BPF = linux.SO_DETACH_BPF + SOL_SOCKET = linux.SOL_SOCKET + SIGPROF = linux.SIGPROF + SIGUSR1 = linux.SIGUSR1 + SIG_BLOCK = linux.SIG_BLOCK + SIG_UNBLOCK = linux.SIG_UNBLOCK + BPF_FS_MAGIC = linux.BPF_FS_MAGIC + TRACEFS_MAGIC = linux.TRACEFS_MAGIC + DEBUGFS_MAGIC = linux.DEBUGFS_MAGIC + BPF_RB_NO_WAKEUP = linux.BPF_RB_NO_WAKEUP + BPF_RB_FORCE_WAKEUP = linux.BPF_RB_FORCE_WAKEUP + AF_UNSPEC = linux.AF_UNSPEC + IFF_UP = linux.IFF_UP + CLONE_NEWNET = linux.CLONE_NEWNET + LINUX_CAPABILITY_VERSION_3 = linux.LINUX_CAPABILITY_VERSION_3 ) type Statfs_t = linux.Statfs_t @@ -85,6 +88,8 @@ type EpollEvent = linux.EpollEvent type PerfEventAttr = linux.PerfEventAttr type Utsname = linux.Utsname type CPUSet = linux.CPUSet +type CapUserData = linux.CapUserData +type CapUserHeader = linux.CapUserHeader func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { return linux.Syscall(trap, a1, a2, a3) @@ -210,3 +215,19 @@ func SchedGetaffinity(pid int, set *CPUSet) error { func Auxv() ([][2]uintptr, error) { return linux.Auxv() } + +func Unshare(flag int) error { + return linux.Unshare(flag) +} + +func Setns(fd int, nstype int) error { + return linux.Setns(fd, nstype) +} + +func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { + return linux.Capget(hdr, data) +} + +func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { + return linux.Capset(hdr, data) +} diff --git a/vendor/github.com/cilium/ebpf/internal/unix/types_other.go b/vendor/github.com/cilium/ebpf/internal/unix/types_other.go index f3f764ebe..7ab8f9294 100644 --- a/vendor/github.com/cilium/ebpf/internal/unix/types_other.go +++ b/vendor/github.com/cilium/ebpf/internal/unix/types_other.go @@ -73,6 +73,7 @@ const ( BPF_F_LOCK AF_UNSPEC IFF_UP + LINUX_CAPABILITY_VERSION_3 ) type Statfs_t struct { @@ -115,6 +116,17 @@ type Sigset_t struct { Val [4]uint64 } +type CapUserHeader struct { + Version uint32 + Pid int32 +} + +type CapUserData struct { + Effective uint32 + Permitted uint32 + Inheritable uint32 +} + func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) { return 0, 0, ENOTSUP } @@ -288,3 +300,11 @@ func SchedGetaffinity(pid int, set *CPUSet) error { func Auxv() ([][2]uintptr, error) { return nil, errNonLinux() } + +func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { + return errNonLinux() +} + +func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { + return errNonLinux() +} diff --git a/vendor/github.com/cilium/ebpf/link/kprobe.go b/vendor/github.com/cilium/ebpf/link/kprobe.go index 0912e0a08..8b27677f9 100644 --- a/vendor/github.com/cilium/ebpf/link/kprobe.go +++ b/vendor/github.com/cilium/ebpf/link/kprobe.go @@ -33,8 +33,9 @@ type KprobeOptions struct { // Increase the maximum number of concurrent invocations of a kretprobe. // Required when tracing some long running functions in the kernel. // - // Deprecated: this setting forces the use of an outdated kernel API and is not portable - // across kernel versions. + // Warning: this setting forces the use of an outdated kernel API and is + // not portable across kernel versions. On supported kernels, consider using + // fexit programs instead, as they don't have this MaxActive limitation. RetprobeMaxActive int // Prefix used for the event name if the kprobe must be attached using tracefs. // The group name will be formatted as `_`. diff --git a/vendor/github.com/cilium/ebpf/link/kprobe_multi.go b/vendor/github.com/cilium/ebpf/link/kprobe_multi.go index 3a2b06a24..b72ea9587 100644 --- a/vendor/github.com/cilium/ebpf/link/kprobe_multi.go +++ b/vendor/github.com/cilium/ebpf/link/kprobe_multi.go @@ -8,8 +8,7 @@ import ( "os" "github.com/cilium/ebpf" - "github.com/cilium/ebpf/asm" - "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/features" "github.com/cilium/ebpf/internal/sys" "github.com/cilium/ebpf/internal/unix" ) @@ -126,11 +125,11 @@ func kprobeMulti(prog *ebpf.Program, opts KprobeMultiOptions, flags uint32) (Lin } if opts.Session { - if haveFeatErr := haveBPFLinkKprobeSession(); haveFeatErr != nil { + if haveFeatErr := features.HaveBPFLinkKprobeSession(); haveFeatErr != nil { return nil, haveFeatErr } } else { - if haveFeatErr := haveBPFLinkKprobeMulti(); haveFeatErr != nil { + if haveFeatErr := features.HaveBPFLinkKprobeMulti(); haveFeatErr != nil { return nil, haveFeatErr } } @@ -157,12 +156,30 @@ func (kml *kprobeMultiLink) Update(_ *ebpf.Program) error { func (kml *kprobeMultiLink) Info() (*Info, error) { var info sys.KprobeMultiLinkInfo if err := sys.ObjInfo(kml.fd, &info); err != nil { - return nil, fmt.Errorf("kprobe multi link info: %s", err) + return nil, fmt.Errorf("kprobe multi link info: %w", err) + } + var addrs = make([]uint64, info.Count) + var cookies = make([]uint64, info.Count) + info = sys.KprobeMultiLinkInfo{ + Addrs: sys.SlicePointer(addrs), + Cookies: sys.SlicePointer(cookies), + Count: uint32(len(addrs)), + } + if err := sys.ObjInfo(kml.fd, &info); err != nil { + return nil, fmt.Errorf("kprobe multi link info: %w", err) + } + if info.Addrs.IsNil() { + addrs = nil + } + if info.Cookies.IsNil() { + cookies = nil } extra := &KprobeMultiInfo{ - count: info.Count, - flags: info.Flags, - missed: info.Missed, + Count: info.Count, + Flags: info.Flags, + Missed: info.Missed, + addrs: addrs, + cookies: cookies, } return &Info{ @@ -172,85 +189,3 @@ func (kml *kprobeMultiLink) Info() (*Info, error) { extra, }, nil } - -var haveBPFLinkKprobeMulti = internal.NewFeatureTest("bpf_link_kprobe_multi", func() error { - prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ - Name: "probe_kpm_link", - Type: ebpf.Kprobe, - Instructions: asm.Instructions{ - asm.Mov.Imm(asm.R0, 0), - asm.Return(), - }, - AttachType: ebpf.AttachTraceKprobeMulti, - License: "MIT", - }) - if errors.Is(err, unix.E2BIG) { - // Kernel doesn't support AttachType field. - return internal.ErrNotSupported - } - if err != nil { - return err - } - defer prog.Close() - - fd, err := sys.LinkCreateKprobeMulti(&sys.LinkCreateKprobeMultiAttr{ - ProgFd: uint32(prog.FD()), - AttachType: sys.BPF_TRACE_KPROBE_MULTI, - Count: 1, - Syms: sys.NewStringSlicePointer([]string{"vprintk"}), - }) - switch { - case errors.Is(err, unix.EINVAL): - return internal.ErrNotSupported - // If CONFIG_FPROBE isn't set. - case errors.Is(err, unix.EOPNOTSUPP): - return internal.ErrNotSupported - case err != nil: - return err - } - - fd.Close() - - return nil -}, "5.18") - -var haveBPFLinkKprobeSession = internal.NewFeatureTest("bpf_link_kprobe_session", func() error { - prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ - Name: "probe_kps_link", - Type: ebpf.Kprobe, - Instructions: asm.Instructions{ - asm.Mov.Imm(asm.R0, 0), - asm.Return(), - }, - AttachType: ebpf.AttachTraceKprobeSession, - License: "MIT", - }) - if errors.Is(err, unix.E2BIG) { - // Kernel doesn't support AttachType field. - return internal.ErrNotSupported - } - if err != nil { - return err - } - defer prog.Close() - - fd, err := sys.LinkCreateKprobeMulti(&sys.LinkCreateKprobeMultiAttr{ - ProgFd: uint32(prog.FD()), - AttachType: sys.BPF_TRACE_KPROBE_SESSION, - Count: 1, - Syms: sys.NewStringSlicePointer([]string{"vprintk"}), - }) - switch { - case errors.Is(err, unix.EINVAL): - return internal.ErrNotSupported - // If CONFIG_FPROBE isn't set. - case errors.Is(err, unix.EOPNOTSUPP): - return internal.ErrNotSupported - case err != nil: - return err - } - - fd.Close() - - return nil -}, "6.10") diff --git a/vendor/github.com/cilium/ebpf/link/link.go b/vendor/github.com/cilium/ebpf/link/link.go index aaff08a15..32be58982 100644 --- a/vendor/github.com/cilium/ebpf/link/link.go +++ b/vendor/github.com/cilium/ebpf/link/link.go @@ -55,13 +55,6 @@ type Link interface { isLink() } -// NewLinkFromFD creates a link from a raw fd. -// -// Deprecated: use [NewFromFD] instead. -func NewLinkFromFD(fd int) (Link, error) { - return NewFromFD(fd) -} - // NewFromFD creates a link from a raw fd. // // You should not use fd after calling this function. diff --git a/vendor/github.com/cilium/ebpf/link/link_other.go b/vendor/github.com/cilium/ebpf/link/link_other.go index cd9452fd8..2071b819c 100644 --- a/vendor/github.com/cilium/ebpf/link/link_other.go +++ b/vendor/github.com/cilium/ebpf/link/link_other.go @@ -25,6 +25,7 @@ const ( UprobeMultiType = sys.BPF_LINK_TYPE_UPROBE_MULTI NetfilterType = sys.BPF_LINK_TYPE_NETFILTER NetkitType = sys.BPF_LINK_TYPE_NETKIT + StructOpsType = sys.BPF_LINK_TYPE_STRUCT_OPS ) // AttachRawLink creates a raw link. @@ -102,15 +103,17 @@ func wrapRawLink(raw *RawLink) (_ Link, err error) { return &netkitLink{*raw}, nil case XDPType: return &xdpLink{*raw}, nil + case StructOpsType: + return &structOpsLink{*raw}, nil default: return raw, nil } } type TracingInfo struct { - AttachType sys.AttachType - TargetObjId uint32 - TargetBtfId sys.TypeID + AttachType sys.AttachType + TargetObjectId uint32 + TargetBtfId sys.TypeID } type CgroupInfo struct { @@ -120,7 +123,7 @@ type CgroupInfo struct { } type NetNsInfo struct { - NetnsIno uint32 + NetnsInode uint32 AttachType sys.AttachType } @@ -134,10 +137,10 @@ type XDPInfo struct { } type NetfilterInfo struct { - Pf uint32 - Hooknum uint32 - Priority int32 - Flags uint32 + ProtocolFamily NetfilterProtocolFamily + Hook NetfilterInetHook + Priority int32 + Flags uint32 } type NetkitInfo struct { @@ -145,25 +148,95 @@ type NetkitInfo struct { AttachType sys.AttachType } +type RawTracepointInfo struct { + Name string +} + type KprobeMultiInfo struct { - count uint32 - flags uint32 - missed uint64 + // Count is the number of addresses hooked by the kprobe. + Count uint32 + Flags uint32 + Missed uint64 + addrs []uint64 + cookies []uint64 +} + +type KprobeMultiAddress struct { + Address uint64 + Cookie uint64 +} + +// Addresses are the addresses hooked by the kprobe. +func (kpm *KprobeMultiInfo) Addresses() ([]KprobeMultiAddress, bool) { + if kpm.addrs == nil || len(kpm.addrs) != len(kpm.cookies) { + return nil, false + } + addrs := make([]KprobeMultiAddress, len(kpm.addrs)) + for i := range kpm.addrs { + addrs[i] = KprobeMultiAddress{ + Address: kpm.addrs[i], + Cookie: kpm.cookies[i], + } + } + return addrs, true +} + +type UprobeMultiInfo struct { + Count uint32 + Flags uint32 + Missed uint64 + offsets []uint64 + cookies []uint64 + refCtrOffsets []uint64 + // File is the path that the file the uprobe was attached to + // had at creation time. + // + // However, due to various circumstances (differing mount namespaces, + // file replacement, ...), this path may not point to the same binary + // the uprobe was originally attached to. + File string + pid uint32 } -// AddressCount is the number of addresses hooked by the kprobe. -func (kpm *KprobeMultiInfo) AddressCount() (uint32, bool) { - return kpm.count, kpm.count > 0 +type UprobeMultiOffset struct { + Offset uint64 + Cookie uint64 + ReferenceCount uint64 } -func (kpm *KprobeMultiInfo) Flags() (uint32, bool) { - return kpm.flags, kpm.count > 0 +// Offsets returns the offsets that the uprobe was attached to along with the related cookies and ref counters. +func (umi *UprobeMultiInfo) Offsets() ([]UprobeMultiOffset, bool) { + if umi.offsets == nil || len(umi.cookies) != len(umi.offsets) || len(umi.refCtrOffsets) != len(umi.offsets) { + return nil, false + } + var adresses = make([]UprobeMultiOffset, len(umi.offsets)) + for i := range umi.offsets { + adresses[i] = UprobeMultiOffset{ + Offset: umi.offsets[i], + Cookie: umi.cookies[i], + ReferenceCount: umi.refCtrOffsets[i], + } + } + return adresses, true } -func (kpm *KprobeMultiInfo) Missed() (uint64, bool) { - return kpm.missed, kpm.count > 0 +// Pid returns the process ID that this uprobe is attached to. +// +// If it does not exist, the uprobe will trigger for all processes. +func (umi *UprobeMultiInfo) Pid() (uint32, bool) { + return umi.pid, umi.pid > 0 } +const ( + PerfEventUnspecified = sys.BPF_PERF_EVENT_UNSPEC + PerfEventUprobe = sys.BPF_PERF_EVENT_UPROBE + PerfEventUretprobe = sys.BPF_PERF_EVENT_URETPROBE + PerfEventKprobe = sys.BPF_PERF_EVENT_KPROBE + PerfEventKretprobe = sys.BPF_PERF_EVENT_KRETPROBE + PerfEventTracepoint = sys.BPF_PERF_EVENT_TRACEPOINT + PerfEventEvent = sys.BPF_PERF_EVENT_EVENT +) + type PerfEventInfo struct { Type sys.PerfEventType extra interface{} @@ -174,17 +247,50 @@ func (r *PerfEventInfo) Kprobe() *KprobeInfo { return e } +func (r *PerfEventInfo) Uprobe() *UprobeInfo { + e, _ := r.extra.(*UprobeInfo) + return e +} + +func (r *PerfEventInfo) Tracepoint() *TracepointInfo { + e, _ := r.extra.(*TracepointInfo) + return e +} + +func (r *PerfEventInfo) Event() *EventInfo { + e, _ := r.extra.(*EventInfo) + return e +} + type KprobeInfo struct { - address uint64 - missed uint64 + Address uint64 + Missed uint64 + Function string + Offset uint32 } -func (kp *KprobeInfo) Address() (uint64, bool) { - return kp.address, kp.address > 0 +type UprobeInfo struct { + // File is the path that the file the uprobe was attached to + // had at creation time. + // + // However, due to various circumstances (differing mount namespaces, + // file replacement, ...), this path may not point to the same binary + // the uprobe was originally attached to. + File string + Offset uint32 + Cookie uint64 + OffsetReferenceCount uint64 } -func (kp *KprobeInfo) Missed() (uint64, bool) { - return kp.missed, kp.address > 0 +type TracepointInfo struct { + Tracepoint string + Cookie uint64 +} + +type EventInfo struct { + Config uint64 + Type uint32 + Cookie uint64 } // Tracing returns tracing type-specific link info. @@ -251,6 +357,14 @@ func (r Info) KprobeMulti() *KprobeMultiInfo { return e } +// UprobeMulti returns uprobe-multi type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) UprobeMulti() *UprobeMultiInfo { + e, _ := r.extra.(*UprobeMultiInfo) + return e +} + // PerfEvent returns perf-event type-specific link info. // // Returns nil if the type-specific link info isn't available. @@ -258,3 +372,11 @@ func (r Info) PerfEvent() *PerfEventInfo { e, _ := r.extra.(*PerfEventInfo) return e } + +// RawTracepoint returns raw-tracepoint type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) RawTracepoint() *RawTracepointInfo { + e, _ := r.extra.(*RawTracepointInfo) + return e +} diff --git a/vendor/github.com/cilium/ebpf/link/netfilter.go b/vendor/github.com/cilium/ebpf/link/netfilter.go index 90e914c51..6b5177794 100644 --- a/vendor/github.com/cilium/ebpf/link/netfilter.go +++ b/vendor/github.com/cilium/ebpf/link/netfilter.go @@ -13,13 +13,35 @@ const NetfilterIPDefrag NetfilterAttachFlags = 0 // Enable IP packet defragmenta type NetfilterAttachFlags uint32 +type NetfilterInetHook = sys.NetfilterInetHook + +const ( + NetfilterInetPreRouting = sys.NF_INET_PRE_ROUTING + NetfilterInetLocalIn = sys.NF_INET_LOCAL_IN + NetfilterInetForward = sys.NF_INET_FORWARD + NetfilterInetLocalOut = sys.NF_INET_LOCAL_OUT + NetfilterInetPostRouting = sys.NF_INET_POST_ROUTING +) + +type NetfilterProtocolFamily = sys.NetfilterProtocolFamily + +const ( + NetfilterProtoUnspec = sys.NFPROTO_UNSPEC + NetfilterProtoInet = sys.NFPROTO_INET // Inet applies to both IPv4 and IPv6 + NetfilterProtoIPv4 = sys.NFPROTO_IPV4 + NetfilterProtoARP = sys.NFPROTO_ARP + NetfilterProtoNetdev = sys.NFPROTO_NETDEV + NetfilterProtoBridge = sys.NFPROTO_BRIDGE + NetfilterProtoIPv6 = sys.NFPROTO_IPV6 +) + type NetfilterOptions struct { // Program must be a netfilter BPF program. Program *ebpf.Program // The protocol family. - ProtocolFamily uint32 - // The number of the hook you are interested in. - HookNumber uint32 + ProtocolFamily NetfilterProtocolFamily + // The netfilter hook to attach to. + Hook NetfilterInetHook // Priority within hook Priority int32 // Extra link flags @@ -51,8 +73,8 @@ func AttachNetfilter(opts NetfilterOptions) (Link, error) { ProgFd: uint32(opts.Program.FD()), AttachType: sys.BPF_NETFILTER, Flags: opts.Flags, - Pf: uint32(opts.ProtocolFamily), - Hooknum: uint32(opts.HookNumber), + Pf: opts.ProtocolFamily, + Hooknum: opts.Hook, Priority: opts.Priority, NetfilterFlags: uint32(opts.NetfilterFlags), } @@ -75,10 +97,10 @@ func (nf *netfilterLink) Info() (*Info, error) { return nil, fmt.Errorf("netfilter link info: %s", err) } extra := &NetfilterInfo{ - Pf: info.Pf, - Hooknum: info.Hooknum, - Priority: info.Priority, - Flags: info.Flags, + ProtocolFamily: info.Pf, + Hook: info.Hooknum, + Priority: info.Priority, + Flags: info.Flags, } return &Info{ diff --git a/vendor/github.com/cilium/ebpf/link/netns.go b/vendor/github.com/cilium/ebpf/link/netns.go index a9f7ee79c..fd7d6c6c2 100644 --- a/vendor/github.com/cilium/ebpf/link/netns.go +++ b/vendor/github.com/cilium/ebpf/link/netns.go @@ -44,7 +44,7 @@ func (ns *NetNsLink) Info() (*Info, error) { return nil, fmt.Errorf("netns link info: %s", err) } extra := &NetNsInfo{ - NetnsIno: info.NetnsIno, + NetnsInode: info.NetnsIno, AttachType: info.AttachType, } diff --git a/vendor/github.com/cilium/ebpf/link/perf_event.go b/vendor/github.com/cilium/ebpf/link/perf_event.go index 22c78ed92..a1455cabc 100644 --- a/vendor/github.com/cilium/ebpf/link/perf_event.go +++ b/vendor/github.com/cilium/ebpf/link/perf_event.go @@ -132,6 +132,32 @@ func (pl *perfEventLink) PerfEvent() (*os.File, error) { return fd.File("perf-event") } +// queryInfoWithString queries object info that contains a string field. +// +// The passed stringField and stringLengthField must point to the string field +// and its length field inside the info struct respectively. +// +// It returns the queried string and fills in the passed info struct. +func queryInfoWithString(fd *sys.FD, info sys.Info, stringField *sys.TypedPointer[byte], stringLengthField *uint32) (string, error) { + // Query info to get the length + if err := sys.ObjInfo(fd, info); err != nil { + return "", err + } + + // The stringLengthField pointer points to a field inside info, so it is now populated. + var stringData = make([]byte, *stringLengthField) + *stringField = sys.SlicePointer(stringData) + + // Query info again to fill in the string. + // Since the stringField pointer points to a field inside info, + // the info now contains the pointer to our allocated stringData. + if err := sys.ObjInfo(fd, info); err != nil { + return "", fmt.Errorf("object info with string: %s", err) + } + + return unix.ByteSliceToString(stringData), nil +} + func (pl *perfEventLink) Info() (*Info, error) { var info sys.PerfEventLinkInfo if err := sys.ObjInfo(pl.fd, &info); err != nil { @@ -140,14 +166,50 @@ func (pl *perfEventLink) Info() (*Info, error) { var extra2 interface{} switch info.PerfEventType { - case sys.BPF_PERF_EVENT_KPROBE, sys.BPF_PERF_EVENT_KRETPROBE: + case PerfEventKprobe, PerfEventKretprobe: var kprobeInfo sys.KprobeLinkInfo - if err := sys.ObjInfo(pl.fd, &kprobeInfo); err != nil { + funcName, err := queryInfoWithString(pl.fd, &kprobeInfo, &kprobeInfo.FuncName, &kprobeInfo.NameLen) + if err != nil { return nil, fmt.Errorf("kprobe link info: %s", err) } extra2 = &KprobeInfo{ - address: kprobeInfo.Addr, - missed: kprobeInfo.Missed, + Address: kprobeInfo.Addr, + Missed: kprobeInfo.Missed, + Function: funcName, + Offset: kprobeInfo.Offset, + } + case PerfEventUprobe, PerfEventUretprobe: + var uprobeInfo sys.UprobeLinkInfo + fileName, err := queryInfoWithString(pl.fd, &uprobeInfo, &uprobeInfo.FileName, &uprobeInfo.NameLen) + if err != nil { + return nil, fmt.Errorf("uprobe link info: %s", err) + } + extra2 = &UprobeInfo{ + Offset: uprobeInfo.Offset, + Cookie: uprobeInfo.Cookie, + OffsetReferenceCount: uprobeInfo.RefCtrOffset, + File: fileName, + } + case PerfEventTracepoint: + var tracepointInfo sys.TracepointLinkInfo + tpName, err := queryInfoWithString(pl.fd, &tracepointInfo, &tracepointInfo.TpName, &tracepointInfo.NameLen) + if err != nil { + return nil, fmt.Errorf("perf event link info: %w", err) + } + extra2 = &TracepointInfo{ + Tracepoint: tpName, + Cookie: tracepointInfo.Cookie, + } + case PerfEventEvent: + var eventInfo sys.EventLinkInfo + err := sys.ObjInfo(pl.fd, &eventInfo) + if err != nil { + return nil, fmt.Errorf("trace point link info: %s", err) + } + extra2 = &EventInfo{ + Config: eventInfo.Config, + Type: eventInfo.EventType, + Cookie: eventInfo.Cookie, } } diff --git a/vendor/github.com/cilium/ebpf/link/query.go b/vendor/github.com/cilium/ebpf/link/query.go index eeca82811..c722ab27b 100644 --- a/vendor/github.com/cilium/ebpf/link/query.go +++ b/vendor/github.com/cilium/ebpf/link/query.go @@ -4,6 +4,7 @@ package link import ( "fmt" + "slices" "github.com/cilium/ebpf" "github.com/cilium/ebpf/internal/sys" @@ -33,7 +34,12 @@ type QueryResult struct { // HaveLinkInfo returns true if the kernel supports querying link information // for a particular [ebpf.AttachType]. func (qr *QueryResult) HaveLinkInfo() bool { - return qr.Revision > 0 + return slices.ContainsFunc(qr.Programs, + func(ap AttachedProgram) bool { + _, ok := ap.LinkID() + return ok + }, + ) } type AttachedProgram struct { @@ -44,8 +50,7 @@ type AttachedProgram struct { // LinkID returns the ID associated with the program. // // Returns 0, false if the kernel doesn't support retrieving the ID or if the -// program wasn't attached via a link. See [QueryResult.HaveLinkInfo] if you -// need to tell the two apart. +// program wasn't attached via a link. func (ap *AttachedProgram) LinkID() (ID, bool) { return ap.linkID, ap.linkID != 0 } diff --git a/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go b/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go index 60e667a0c..5e974e3de 100644 --- a/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go +++ b/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go @@ -91,3 +91,19 @@ var _ Link = (*rawTracepoint)(nil) func (rt *rawTracepoint) Update(_ *ebpf.Program) error { return fmt.Errorf("update raw_tracepoint: %w", ErrNotSupported) } + +func (rt *rawTracepoint) Info() (*Info, error) { + var info sys.RawTracepointLinkInfo + name, err := queryInfoWithString(rt.fd, &info, &info.TpName, &info.TpNameLen) + if err != nil { + return nil, err + } + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + &RawTracepointInfo{ + Name: name, + }, + }, nil +} diff --git a/vendor/github.com/cilium/ebpf/link/struct_ops.go b/vendor/github.com/cilium/ebpf/link/struct_ops.go new file mode 100644 index 000000000..d1b7f2517 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/struct_ops.go @@ -0,0 +1,51 @@ +package link + +import ( + "fmt" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" +) + +type structOpsLink struct { + RawLink +} + +func (*structOpsLink) Update(*ebpf.Program) error { + return fmt.Errorf("update struct_ops link: %w", ErrNotSupported) +} + +type StructOpsOptions struct { + Map *ebpf.Map +} + +// AttachStructOps attaches a struct_ops map (created from a ".struct_ops.link" +// section) to its kernel subsystem via a BPF link. +func AttachStructOps(opts StructOpsOptions) (Link, error) { + m := opts.Map + + if m == nil { + return nil, fmt.Errorf("map cannot be nil") + } + + if t := m.Type(); t != ebpf.StructOpsMap { + return nil, fmt.Errorf("can't attach non-struct_ops map") + } + + mapFD := m.FD() + if mapFD <= 0 { + return nil, fmt.Errorf("invalid map: %s", sys.ErrClosedFd) + } + + fd, err := sys.LinkCreate(&sys.LinkCreateAttr{ + // For struct_ops links, the mapFD must be passed as ProgFd. + ProgFd: uint32(mapFD), + AttachType: sys.AttachType(ebpf.AttachStructOps), + TargetFd: 0, + }) + if err != nil { + return nil, fmt.Errorf("attach StructOps: create link: %w", err) + } + + return &structOpsLink{RawLink{fd: fd}}, nil +} diff --git a/vendor/github.com/cilium/ebpf/link/tracing.go b/vendor/github.com/cilium/ebpf/link/tracing.go index b33b3dc0e..d0e595e6f 100644 --- a/vendor/github.com/cilium/ebpf/link/tracing.go +++ b/vendor/github.com/cilium/ebpf/link/tracing.go @@ -26,9 +26,9 @@ func (f *tracing) Info() (*Info, error) { return nil, fmt.Errorf("tracing link info: %s", err) } extra := &TracingInfo{ - TargetObjId: info.TargetObjId, - TargetBtfId: info.TargetBtfId, - AttachType: info.AttachType, + TargetObjectId: info.TargetObjId, + TargetBtfId: info.TargetBtfId, + AttachType: info.AttachType, } return &Info{ diff --git a/vendor/github.com/cilium/ebpf/link/uprobe.go b/vendor/github.com/cilium/ebpf/link/uprobe.go index d20997e9d..d53975d53 100644 --- a/vendor/github.com/cilium/ebpf/link/uprobe.go +++ b/vendor/github.com/cilium/ebpf/link/uprobe.go @@ -6,6 +6,7 @@ import ( "debug/elf" "errors" "fmt" + "io/fs" "os" "sync" @@ -34,14 +35,28 @@ var ( ErrNoSymbol = errors.New("not found") ) +const permExec fs.FileMode = 0111 + // Executable defines an executable program on the filesystem. type Executable struct { // Path of the executable on the filesystem. path string - // Parsed ELF and dynamic symbols' cachedAddresses. - cachedAddresses map[string]uint64 - // Keep track of symbol table lazy load. - cacheAddressesOnce sync.Once + // Cached symbol values for all ELF and dynamic symbols. + // Before using this, lazyLoadSymbols() must be called. + cachedSymbols map[string]symbol + // cachedSymbolsOnce tracks the lazy load of cachedSymbols. + cachedSymbolsOnce sync.Once +} + +type symbol struct { + addr uint64 + size uint64 +} + +// contains returns true if the given address falls within the range +// covered by the symbol. +func (s symbol) contains(address uint64) bool { + return s.addr <= address && address < s.addr+s.size } // UprobeOptions defines additional parameters that will be used @@ -98,20 +113,18 @@ func OpenExecutable(path string) (*Executable, error) { return nil, fmt.Errorf("path cannot be empty") } - f, err := internal.OpenSafeELFFile(path) + info, err := os.Stat(path) if err != nil { - return nil, fmt.Errorf("parse ELF file: %w", err) + return nil, fmt.Errorf("stat executable: %w", err) } - defer f.Close() - if f.Type != elf.ET_EXEC && f.Type != elf.ET_DYN { - // ELF is not an executable or a shared object. - return nil, errors.New("the given file is not an executable or a shared object") + if info.Mode()&permExec == 0 { + return nil, fmt.Errorf("file %s is not executable", path) } return &Executable{ - path: path, - cachedAddresses: make(map[string]uint64), + path: path, + cachedSymbols: make(map[string]symbol), }, nil } @@ -155,22 +168,18 @@ func (ex *Executable) load(f *internal.SafeELFFile) error { } } - ex.cachedAddresses[s.Name] = address + ex.cachedSymbols[s.Name] = symbol{ + addr: address, + size: s.Size, + } } return nil } -// address calculates the address of a symbol in the executable. -// -// opts must not be nil. -func (ex *Executable) address(symbol string, address, offset uint64) (uint64, error) { - if address > 0 { - return address + offset, nil - } - +func (ex *Executable) lazyLoadSymbols() error { var err error - ex.cacheAddressesOnce.Do(func() { + ex.cachedSymbolsOnce.Do(func() { var f *internal.SafeELFFile f, err = internal.OpenSafeELFFile(ex.path) if err != nil { @@ -179,13 +188,30 @@ func (ex *Executable) address(symbol string, address, offset uint64) (uint64, er } defer f.Close() + if f.Type != elf.ET_EXEC && f.Type != elf.ET_DYN { + // ELF is not an executable or a shared object. + err = errors.New("the given file is not an executable or a shared object") + return + } err = ex.load(f) }) + return err +} + +// address calculates the address of a symbol in the executable. +// +// opts must not be nil. +func (ex *Executable) address(symbol string, address, offset uint64) (uint64, error) { + if address > 0 { + return address + offset, nil + } + + err := ex.lazyLoadSymbols() if err != nil { return 0, fmt.Errorf("lazy load symbols: %w", err) } - address, ok := ex.cachedAddresses[symbol] + sym, ok := ex.cachedSymbols[symbol] if !ok { return 0, fmt.Errorf("symbol %s: %w", symbol, ErrNoSymbol) } @@ -196,12 +222,39 @@ func (ex *Executable) address(symbol string, address, offset uint64) (uint64, er // // Since only offset values are stored and not elf.Symbol, if the value is 0, // assume it's an external symbol. - if address == 0 { + if sym.addr == 0 { return 0, fmt.Errorf("cannot resolve %s library call '%s': %w "+ "(consider providing UprobeOptions.Address)", ex.path, symbol, ErrNotSupported) } - return address + offset, nil + if offset >= sym.size { + return 0, fmt.Errorf("offset %d is out of range of symbol %s", offset, symbol) + } + + return sym.addr + offset, nil +} + +// SymbolOffset represents an offset within a symbol within a binary. +type SymbolOffset struct { + Symbol string + Offset uint64 +} + +// Symbol returns the SymbolOffset that the given address points to. +// This includes the symbol name and the offset within that symbol. +// +// If no symbol is found for the given address, ErrNoSymbol is returned. +func (ex *Executable) Symbol(address uint64) (SymbolOffset, error) { + if err := ex.lazyLoadSymbols(); err != nil { + return SymbolOffset{}, fmt.Errorf("lazy load symbols: %w", err) + } + + for name, symbol := range ex.cachedSymbols { + if symbol.contains(address) { + return SymbolOffset{name, address - symbol.addr}, nil + } + } + return SymbolOffset{}, ErrNoSymbol } // Uprobe attaches the given eBPF program to a perf event that fires when the diff --git a/vendor/github.com/cilium/ebpf/link/uprobe_multi.go b/vendor/github.com/cilium/ebpf/link/uprobe_multi.go index e34ad7168..f017405aa 100644 --- a/vendor/github.com/cilium/ebpf/link/uprobe_multi.go +++ b/vendor/github.com/cilium/ebpf/link/uprobe_multi.go @@ -8,8 +8,7 @@ import ( "os" "github.com/cilium/ebpf" - "github.com/cilium/ebpf/asm" - "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/features" "github.com/cilium/ebpf/internal/sys" "github.com/cilium/ebpf/internal/unix" ) @@ -108,7 +107,7 @@ func (ex *Executable) uprobeMulti(symbols []string, prog *ebpf.Program, opts *Up } if err != nil { - if haveFeatErr := haveBPFLinkUprobeMulti(); haveFeatErr != nil { + if haveFeatErr := features.HaveBPFLinkUprobeMulti(); haveFeatErr != nil { return nil, haveFeatErr } return nil, err @@ -176,45 +175,54 @@ func (kml *uprobeMultiLink) Update(_ *ebpf.Program) error { return fmt.Errorf("update uprobe_multi: %w", ErrNotSupported) } -var haveBPFLinkUprobeMulti = internal.NewFeatureTest("bpf_link_uprobe_multi", func() error { - prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ - Name: "probe_upm_link", - Type: ebpf.Kprobe, - Instructions: asm.Instructions{ - asm.Mov.Imm(asm.R0, 0), - asm.Return(), - }, - AttachType: ebpf.AttachTraceUprobeMulti, - License: "MIT", - }) - if errors.Is(err, unix.E2BIG) { - // Kernel doesn't support AttachType field. - return internal.ErrNotSupported - } - if err != nil { - return err - } - defer prog.Close() - - // We try to create uprobe multi link on '/' path which results in - // error with -EBADF in case uprobe multi link is supported. - fd, err := sys.LinkCreateUprobeMulti(&sys.LinkCreateUprobeMultiAttr{ - ProgFd: uint32(prog.FD()), - AttachType: sys.BPF_TRACE_UPROBE_MULTI, - Path: sys.NewStringPointer("/"), - Offsets: sys.SlicePointer([]uint64{0}), - Count: 1, - }) - switch { - case errors.Is(err, unix.EBADF): - return nil - case errors.Is(err, unix.EINVAL): - return internal.ErrNotSupported - case err != nil: - return err - } - - // should not happen - fd.Close() - return errors.New("successfully attached uprobe_multi to /, kernel bug?") -}, "6.6") +func (kml *uprobeMultiLink) Info() (*Info, error) { + var info sys.UprobeMultiLinkInfo + if err := sys.ObjInfo(kml.fd, &info); err != nil { + return nil, fmt.Errorf("uprobe multi link info: %s", err) + } + var ( + path = make([]byte, info.PathSize) + refCtrOffsets = make([]uint64, info.Count) + addrs = make([]uint64, info.Count) + cookies = make([]uint64, info.Count) + ) + info = sys.UprobeMultiLinkInfo{ + Path: sys.SlicePointer(path), + PathSize: uint32(len(path)), + Offsets: sys.SlicePointer(addrs), + RefCtrOffsets: sys.SlicePointer(refCtrOffsets), + Cookies: sys.SlicePointer(cookies), + Count: uint32(len(addrs)), + } + if err := sys.ObjInfo(kml.fd, &info); err != nil { + return nil, fmt.Errorf("uprobe multi link info: %s", err) + } + if info.Path.IsNil() { + path = nil + } + if info.Cookies.IsNil() { + cookies = nil + } + if info.Offsets.IsNil() { + addrs = nil + } + if info.RefCtrOffsets.IsNil() { + refCtrOffsets = nil + } + extra := &UprobeMultiInfo{ + Count: info.Count, + Flags: info.Flags, + pid: info.Pid, + offsets: addrs, + cookies: cookies, + refCtrOffsets: refCtrOffsets, + File: unix.ByteSliceToString(path), + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, + }, nil +} diff --git a/vendor/github.com/cilium/ebpf/linker.go b/vendor/github.com/cilium/ebpf/linker.go index 98c4a0d0b..d23963ce9 100644 --- a/vendor/github.com/cilium/ebpf/linker.go +++ b/vendor/github.com/cilium/ebpf/linker.go @@ -54,47 +54,6 @@ func (hs *handles) Close() error { return errors.Join(errs...) } -// splitSymbols splits insns into subsections delimited by Symbol Instructions. -// insns cannot be empty and must start with a Symbol Instruction. -// -// The resulting map is indexed by Symbol name. -func splitSymbols(insns asm.Instructions) (map[string]asm.Instructions, error) { - if len(insns) == 0 { - return nil, errors.New("insns is empty") - } - - currentSym := insns[0].Symbol() - if currentSym == "" { - return nil, errors.New("insns must start with a Symbol") - } - - start := 0 - progs := make(map[string]asm.Instructions) - for i, ins := range insns[1:] { - i := i + 1 - - sym := ins.Symbol() - if sym == "" { - continue - } - - // New symbol, flush the old one out. - progs[currentSym] = slices.Clone(insns[start:i]) - - if progs[sym] != nil { - return nil, fmt.Errorf("insns contains duplicate Symbol %s", sym) - } - currentSym = sym - start = i - } - - if tail := insns[start:]; len(tail) > 0 { - progs[currentSym] = slices.Clone(tail) - } - - return progs, nil -} - // The linker is responsible for resolving bpf-to-bpf calls between programs // within an ELF. Each BPF program must be a self-contained binary blob, // so when an instruction in one ELF program section wants to jump to diff --git a/vendor/github.com/cilium/ebpf/map.go b/vendor/github.com/cilium/ebpf/map.go index f9499272b..78fc99575 100644 --- a/vendor/github.com/cilium/ebpf/map.go +++ b/vendor/github.com/cilium/ebpf/map.go @@ -29,7 +29,6 @@ var ( ErrKeyExist = errors.New("key already exists") ErrIterationAborted = errors.New("iteration aborted") ErrMapIncompatible = errors.New("map spec is incompatible with existing map") - errMapNoBTFValue = errors.New("map spec does not contain a BTF Value") // pre-allocating these errors here since they may get called in hot code paths // and cause unnecessary memory allocations @@ -187,28 +186,71 @@ func (spec *MapSpec) fixupMagicFields() (*MapSpec, error) { return spec, nil } -// dataSection returns the contents and BTF Datasec descriptor of the spec. -func (ms *MapSpec) dataSection() ([]byte, *btf.Datasec, error) { - if ms.Value == nil { - return nil, nil, errMapNoBTFValue +// dataSection returns the contents of a datasec if the MapSpec represents one. +func (ms *MapSpec) dataSection() ([]byte, error) { + if n := len(ms.Contents); n != 1 { + return nil, fmt.Errorf("expected one key, found %d", n) + } + + kv := ms.Contents[0] + if key, ok := ms.Contents[0].Key.(uint32); !ok || key != 0 { + return nil, fmt.Errorf("expected contents to have key 0") } - ds, ok := ms.Value.(*btf.Datasec) + value, ok := kv.Value.([]byte) if !ok { - return nil, nil, fmt.Errorf("map value BTF is a %T, not a *btf.Datasec", ms.Value) + return nil, fmt.Errorf("value at first map key is %T, not []byte", kv.Value) } - if n := len(ms.Contents); n != 1 { - return nil, nil, fmt.Errorf("expected one key, found %d", n) + return value, nil +} + +// updateDataSection copies the values of variables into MapSpec.Contents[0].Value. +// +// Only variables declared in sectionName will be updated. +func (ms *MapSpec) updateDataSection(vars map[string]*VariableSpec, sectionName string) error { + var specs []*VariableSpec + for _, vs := range vars { + if vs.SectionName != sectionName { + continue + } + + specs = append(specs, vs) } - kv := ms.Contents[0] - value, ok := kv.Value.([]byte) - if !ok { - return nil, nil, fmt.Errorf("value at first map key is %T, not []byte", kv.Value) + if len(specs) == 0 { + return nil + } + + data, err := ms.dataSection() + if err != nil { + return err } - return value, ds, nil + // Do not modify the original data slice, ms.Contents is a shallow copy. + data = slices.Clone(data) + + slices.SortFunc(specs, func(a, b *VariableSpec) int { + return int(int64(a.Offset) - int64(b.Offset)) + }) + + offset := uint32(0) + for _, v := range specs { + if v.Offset < offset { + return fmt.Errorf("variable %s (offset %d) overlaps with previous variable (offset %d)", v.Name, v.Offset, offset) + } + + end := v.Offset + v.Size() + if int(end) > len(data) { + return fmt.Errorf("variable %s exceeds map size", v.Name) + } + + copy(data[v.Offset:end], v.Value) + offset = end + } + + ms.Contents = []MapKV{{Key: uint32(0), Value: data}} + return nil } func (ms *MapSpec) readOnly() bool { diff --git a/vendor/github.com/cilium/ebpf/memory.go b/vendor/github.com/cilium/ebpf/memory.go index e470bf24f..a31fae346 100644 --- a/vendor/github.com/cilium/ebpf/memory.go +++ b/vendor/github.com/cilium/ebpf/memory.go @@ -85,8 +85,8 @@ func (mm *Memory) close() { } // Size returns the size of the memory-mapped region in bytes. -func (mm *Memory) Size() int { - return len(mm.b) +func (mm *Memory) Size() uint32 { + return uint32(len(mm.b)) } // ReadOnly returns true if the memory-mapped region is read-only. @@ -95,11 +95,11 @@ func (mm *Memory) ReadOnly() bool { } // bounds returns true if an access at off of the given size is within bounds. -func (mm *Memory) bounds(off uint64, size uint64) bool { +func (mm *Memory) bounds(off, size uint32) bool { if off+size < off { return false } - return off+size <= uint64(len(mm.b)) + return off+size <= uint32(len(mm.b)) } // ReadAt implements [io.ReaderAt]. Useful for creating a new [io.OffsetWriter]. diff --git a/vendor/github.com/cilium/ebpf/memory_unsafe.go b/vendor/github.com/cilium/ebpf/memory_unsafe.go index 9518ff35d..07463e822 100644 --- a/vendor/github.com/cilium/ebpf/memory_unsafe.go +++ b/vendor/github.com/cilium/ebpf/memory_unsafe.go @@ -220,7 +220,7 @@ func unmap(size int) func(*byte) { // The comparable constraint narrows down the set of eligible types to exclude // slices, maps and functions. These complex types cannot be mapped to memory // directly. -func checkUnsafeMemory[T comparable](mm *Memory, off uint64) error { +func checkUnsafeMemory[T comparable](mm *Memory, off uint32) error { if mm.b == nil { return fmt.Errorf("memory-mapped region is nil") } @@ -241,11 +241,11 @@ func checkUnsafeMemory[T comparable](mm *Memory, off uint64) error { return fmt.Errorf("zero-sized type %s: %w", t, ErrInvalidType) } - if off%uint64(t.Align()) != 0 { + if off%uint32(t.Align()) != 0 { return fmt.Errorf("unaligned access of memory-mapped region: %d-byte aligned read at offset %d", t.Align(), off) } - vs, bs := uint64(size), uint64(len(mm.b)) + vs, bs := uint32(size), uint32(len(mm.b)) if off+vs > bs { return fmt.Errorf("%d-byte value at offset %d exceeds mmap size of %d bytes", vs, off, bs) } @@ -335,7 +335,7 @@ func checkType(name string, t reflect.Type) error { // must be within bounds of the Memory. // // To access read-only memory, use [Memory.ReadAt]. -func memoryPointer[T comparable](mm *Memory, off uint64) (*T, error) { +func memoryPointer[T comparable](mm *Memory, off uint32) (*T, error) { if err := checkUnsafeMemory[T](mm, off); err != nil { return nil, fmt.Errorf("memory pointer: %w", err) } diff --git a/vendor/github.com/cilium/ebpf/prog.go b/vendor/github.com/cilium/ebpf/prog.go index 3e724234d..0394b02f0 100644 --- a/vendor/github.com/cilium/ebpf/prog.go +++ b/vendor/github.com/cilium/ebpf/prog.go @@ -25,6 +25,10 @@ import ( // ErrNotSupported is returned whenever the kernel doesn't support a feature. var ErrNotSupported = internal.ErrNotSupported +// ErrProgIncompatible is returned when a loaded Program is incompatible with a +// given spec. +var ErrProgIncompatible = errors.New("program is incompatible") + // errBadRelocation is returned when the verifier rejects a program due to a // bad CO-RE relocation. // @@ -172,10 +176,34 @@ func (ps *ProgramSpec) Copy() *ProgramSpec { // Tag calculates the kernel tag for a series of instructions. // // Use asm.Instructions.Tag if you need to calculate for non-native endianness. +// +// Deprecated: The value produced by this method no longer matches tags produced +// by the kernel since Linux 6.18. Use [ProgramSpec.Compatible] instead. func (ps *ProgramSpec) Tag() (string, error) { return ps.Instructions.Tag(internal.NativeEndian) } +// Compatible returns nil if a loaded Program's kernel tag matches the one of +// the ProgramSpec. +// +// Returns [ErrProgIncompatible] if the tags do not match. +func (ps *ProgramSpec) Compatible(info *ProgramInfo) error { + if platform.IsWindows { + return fmt.Errorf("%w: Windows does not support tag readback from kernel", internal.ErrNotSupportedOnOS) + } + + ok, err := ps.Instructions.HasTag(info.Tag, internal.NativeEndian) + if err != nil { + return err + } + + if !ok { + return fmt.Errorf("%w: ProgramSpec and Program tags do not match", ErrProgIncompatible) + } + + return nil +} + // targetsKernelModule returns true if the program supports being attached to a // symbol provided by a kernel module. func (ps *ProgramSpec) targetsKernelModule() bool { @@ -745,6 +773,11 @@ type RunOptions struct { // CPU to run Program on. Optional field. // Note not all program types support this field. CPU uint32 + // BatchSize (default 64) affects the kernel's packet buffer allocation behaviour when running + // programs with BPF_F_TEST_XDP_LIVE_FRAMES and a non-zero [RunOptions.Repeat] value. + // For more details, see the kernel documentation on BPF_PROG_RUN: + // https://docs.kernel.org/bpf/bpf_prog_run.html#running-xdp-programs-in-live-frame-mode + BatchSize uint32 // Called whenever the syscall is interrupted, and should be set to testing.B.ResetTimer // or similar. Typically used during benchmarking. Optional field. // @@ -911,6 +944,7 @@ func (p *Program) run(opts *RunOptions) (uint32, time.Duration, error) { CtxOut: sys.SlicePointer(ctxOut), Flags: opts.Flags, Cpu: opts.CPU, + BatchSize: opts.BatchSize, } if p.Type() == Syscall && ctxIn != nil && ctxOut != nil { @@ -1152,6 +1186,12 @@ func findTargetInKernel(typeName string, target *btf.Type, cache *btf.Cache) (*b if errors.Is(err, btf.ErrNotFound) { spec, module, err := findTargetInModule(typeName, target, cache) if err != nil { + // EPERM may be returned when we do not have CAP_SYS_ADMIN. + // Wrap error with btf.ErrNotFound so callers can handle it accordingly. + if errors.Is(err, unix.EPERM) { + return spec, nil, fmt.Errorf("find target in modules: %w (%w)", btf.ErrNotFound, err) + } + return nil, nil, fmt.Errorf("find target in modules: %w", err) } return spec, module, nil diff --git a/vendor/github.com/cilium/ebpf/ringbuf/reader.go b/vendor/github.com/cilium/ebpf/ringbuf/reader.go index fb9b1af22..d1483d78a 100644 --- a/vendor/github.com/cilium/ebpf/ringbuf/reader.go +++ b/vendor/github.com/cilium/ebpf/ringbuf/reader.go @@ -19,6 +19,21 @@ var ( errBusy = errors.New("sample not committed yet") ) +// poller abstracts platform-specific event notification. +type poller interface { + Wait(deadline time.Time) error + Flush() error + Close() error +} + +// eventRing abstracts platform-specific ring buffer memory access. +type eventRing interface { + size() int + AvailableBytes() uint64 + readRecord(rec *Record) error + Close() error +} + // ringbufHeader from 'struct bpf_ringbuf_hdr' in kernel/bpf/ringbuf.c type ringbufHeader struct { Len uint32 @@ -49,11 +64,11 @@ type Record struct { // Reader allows reading bpf_ringbuf_output // from user space. type Reader struct { - poller *poller + poller poller // mu protects read/write access to the Reader structure mu sync.Mutex - ring *ringbufEventRing + ring eventRing haveData bool deadline time.Time bufferSize int @@ -138,7 +153,8 @@ func (r *Reader) SetDeadline(t time.Time) { // See [ReadInto] for a more efficient version of this method. func (r *Reader) Read() (Record, error) { var rec Record - return rec, r.ReadInto(&rec) + err := r.ReadInto(&rec) + return rec, err } // ReadInto is like Read except that it allows reusing Record and associated buffers. diff --git a/vendor/github.com/cilium/ebpf/ringbuf/reader_other.go b/vendor/github.com/cilium/ebpf/ringbuf/reader_other.go index d5617e155..c03632ac4 100644 --- a/vendor/github.com/cilium/ebpf/ringbuf/reader_other.go +++ b/vendor/github.com/cilium/ebpf/ringbuf/reader_other.go @@ -11,12 +11,14 @@ import ( var ErrFlushed = epoll.ErrFlushed -type poller struct { +var _ poller = (*epollPoller)(nil) + +type epollPoller struct { *epoll.Poller events []unix.EpollEvent } -func newPoller(fd int) (*poller, error) { +func newPoller(fd int) (*epollPoller, error) { ep, err := epoll.New() if err != nil { return nil, err @@ -27,15 +29,16 @@ func newPoller(fd int) (*poller, error) { return nil, err } - return &poller{ + return &epollPoller{ Poller: ep, events: make([]unix.EpollEvent, 1), }, nil } +// Wait blocks until data is available or the deadline is reached. // Returns [os.ErrDeadlineExceeded] if a deadline was set and no wakeup was received. // Returns [ErrFlushed] if the ring buffer was flushed manually. -func (p *poller) Wait(deadline time.Time) error { +func (p *epollPoller) Wait(deadline time.Time) error { _, err := p.Poller.Wait(p.events, deadline) return err } diff --git a/vendor/github.com/cilium/ebpf/ringbuf/reader_windows.go b/vendor/github.com/cilium/ebpf/ringbuf/reader_windows.go index f90bfe34e..05201e5c0 100644 --- a/vendor/github.com/cilium/ebpf/ringbuf/reader_windows.go +++ b/vendor/github.com/cilium/ebpf/ringbuf/reader_windows.go @@ -13,16 +13,18 @@ import ( "github.com/cilium/ebpf/internal/efw" ) -type poller struct { +var ErrFlushed = errors.New("ring buffer flushed") + +var _ poller = (*windowsPoller)(nil) + +type windowsPoller struct { closed atomic.Bool handle windows.Handle flushHandle windows.Handle handles []windows.Handle } -var ErrFlushed = errors.New("ring buffer flushed") - -func newPoller(fd int) (*poller, error) { +func newPoller(fd int) (*windowsPoller, error) { handle, err := windows.CreateEvent(nil, 0, 0, nil) if err != nil { return nil, err @@ -40,17 +42,18 @@ func newPoller(fd int) (*poller, error) { return nil, err } - return &poller{ + return &windowsPoller{ handle: handle, flushHandle: flushHandle, handles: []windows.Handle{handle, flushHandle}, }, nil } +// Wait blocks until data is available or the deadline is reached. // Returns [os.ErrDeadlineExceeded] if a deadline was set and no wakeup was received. // Returns [ErrFlushed] if the ring buffer was flushed manually. // Returns [os.ErrClosed] if the poller was closed. -func (p *poller) Wait(deadline time.Time) error { +func (p *windowsPoller) Wait(deadline time.Time) error { if p.closed.Load() { return os.ErrClosed } @@ -82,7 +85,7 @@ func (p *poller) Wait(deadline time.Time) error { } // Flush interrupts [Wait] with [ErrFlushed]. -func (p *poller) Flush() error { +func (p *windowsPoller) Flush() error { // Signal the handle to wake up any waiting threads if err := windows.SetEvent(p.flushHandle); err != nil { if errors.Is(err, windows.ERROR_INVALID_HANDLE) { @@ -94,7 +97,7 @@ func (p *poller) Flush() error { return nil } -func (p *poller) Close() error { +func (p *windowsPoller) Close() error { p.closed.Store(true) if err := p.Flush(); err != nil { diff --git a/vendor/github.com/cilium/ebpf/ringbuf/ring_other.go b/vendor/github.com/cilium/ebpf/ringbuf/ring_other.go index c793499d9..339ef736a 100644 --- a/vendor/github.com/cilium/ebpf/ringbuf/ring_other.go +++ b/vendor/github.com/cilium/ebpf/ringbuf/ring_other.go @@ -12,14 +12,16 @@ import ( "github.com/cilium/ebpf/internal/unix" ) -type ringbufEventRing struct { +var _ eventRing = (*mmapEventRing)(nil) + +type mmapEventRing struct { prod []byte cons []byte *ringReader cleanup runtime.Cleanup } -func newRingBufEventRing(mapFD, size int) (*ringbufEventRing, error) { +func newRingBufEventRing(mapFD, size int) (*mmapEventRing, error) { cons, err := unix.Mmap(mapFD, 0, os.Getpagesize(), unix.PROT_READ|unix.PROT_WRITE, unix.MAP_SHARED) if err != nil { return nil, fmt.Errorf("mmap consumer page: %w", err) @@ -34,7 +36,7 @@ func newRingBufEventRing(mapFD, size int) (*ringbufEventRing, error) { cons_pos := (*uintptr)(unsafe.Pointer(&cons[0])) prod_pos := (*uintptr)(unsafe.Pointer(&prod[0])) - ring := &ringbufEventRing{ + ring := &mmapEventRing{ prod: prod, cons: cons, ringReader: newRingReader(cons_pos, prod_pos, prod[os.Getpagesize():]), @@ -47,7 +49,7 @@ func newRingBufEventRing(mapFD, size int) (*ringbufEventRing, error) { return ring, nil } -func (ring *ringbufEventRing) Close() error { +func (ring *mmapEventRing) Close() error { ring.cleanup.Stop() prod, cons := ring.prod, ring.cons diff --git a/vendor/github.com/cilium/ebpf/ringbuf/ring_windows.go b/vendor/github.com/cilium/ebpf/ringbuf/ring_windows.go index 96c114d6a..b89be3313 100644 --- a/vendor/github.com/cilium/ebpf/ringbuf/ring_windows.go +++ b/vendor/github.com/cilium/ebpf/ringbuf/ring_windows.go @@ -10,7 +10,9 @@ import ( "github.com/cilium/ebpf/internal/sys" ) -type ringbufEventRing struct { +var _ eventRing = (*windowsEventRing)(nil) + +type windowsEventRing struct { mapFd *sys.FD cons, prod, data *uint8 *ringReader @@ -18,7 +20,7 @@ type ringbufEventRing struct { cleanup runtime.Cleanup } -func newRingBufEventRing(mapFD, size int) (*ringbufEventRing, error) { +func newRingBufEventRing(mapFD, size int) (*windowsEventRing, error) { dupFd, err := efw.EbpfDuplicateFd(mapFD) if err != nil { return nil, fmt.Errorf("duplicate map fd: %w", err) @@ -46,7 +48,7 @@ func newRingBufEventRing(mapFD, size int) (*ringbufEventRing, error) { prodPos := (*uintptr)(unsafe.Pointer(prodPtr)) data := unsafe.Slice(dataPtr, dataLen*2) - ring := &ringbufEventRing{ + ring := &windowsEventRing{ mapFd: fd, cons: consPtr, prod: prodPtr, @@ -60,7 +62,7 @@ func newRingBufEventRing(mapFD, size int) (*ringbufEventRing, error) { return ring, nil } -func (ring *ringbufEventRing) Close() error { +func (ring *windowsEventRing) Close() error { ring.cleanup.Stop() return errors.Join( diff --git a/vendor/github.com/cilium/ebpf/staticcheck.conf b/vendor/github.com/cilium/ebpf/staticcheck.conf deleted file mode 100644 index cfc907da3..000000000 --- a/vendor/github.com/cilium/ebpf/staticcheck.conf +++ /dev/null @@ -1,3 +0,0 @@ -# Default configuration from https://staticcheck.dev/docs/configuration with -# SA4003 disabled. Remove when https://github.com/cilium/ebpf/issues/1876 is fixed. -checks = ["all", "-SA9003", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023", "-SA4003"] diff --git a/vendor/github.com/cilium/ebpf/struct_ops.go b/vendor/github.com/cilium/ebpf/struct_ops.go index 162f344ea..3b70d56d2 100644 --- a/vendor/github.com/cilium/ebpf/struct_ops.go +++ b/vendor/github.com/cilium/ebpf/struct_ops.go @@ -1,6 +1,7 @@ package ebpf import ( + "errors" "fmt" "reflect" "strings" @@ -10,6 +11,9 @@ import ( ) const structOpsValuePrefix = "bpf_struct_ops_" +const structOpsLinkSec = ".struct_ops.link" +const structOpsSec = ".struct_ops" +const structOpsKeySize = 4 // structOpsFindInnerType returns the "inner" struct inside a value struct_ops type. // @@ -44,6 +48,9 @@ func structOpsFindTarget(userType *btf.Struct, cache *btf.Cache) (vType *btf.Str target := btf.Type((*btf.Struct)(nil)) spec, module, err := findTargetInKernel(vTypeName, &target, cache) + if errors.Is(err, btf.ErrNotFound) { + return nil, 0, nil, fmt.Errorf("%q doesn't exist in kernel: %w", vTypeName, ErrNotSupported) + } if err != nil { return nil, 0, nil, fmt.Errorf("lookup value type %q: %w", vTypeName, err) } @@ -137,3 +144,32 @@ func structOpsIsMemZeroed(data []byte) bool { } return true } + +// structOpsSetAttachTo sets p.AttachTo in the expected "struct_name:memberName" format +// based on the struct definition. +// +// this relies on the assumption that each member in the +// `.struct_ops` section has a relocation at its starting byte offset. +func structOpsSetAttachTo( + sec *elfSection, + baseOff uint32, + userSt *btf.Struct, + progs map[string]*ProgramSpec) error { + for _, m := range userSt.Members { + memberOff := m.Offset + sym, ok := sec.relocations[uint64(baseOff+memberOff.Bytes())] + if !ok { + continue + } + p, ok := progs[sym.Name] + if !ok || p == nil { + return fmt.Errorf("program %s not found", sym.Name) + } + + if p.Type != StructOps { + return fmt.Errorf("program %s is not StructOps", sym.Name) + } + p.AttachTo = userSt.Name + ":" + m.Name + } + return nil +} diff --git a/vendor/github.com/cilium/ebpf/variable.go b/vendor/github.com/cilium/ebpf/variable.go index c6fd55cba..003ef89ce 100644 --- a/vendor/github.com/cilium/ebpf/variable.go +++ b/vendor/github.com/cilium/ebpf/variable.go @@ -1,9 +1,11 @@ package ebpf import ( + "encoding/binary" "fmt" "io" "reflect" + "slices" "github.com/cilium/ebpf/btf" "github.com/cilium/ebpf/internal/sysenc" @@ -15,56 +17,52 @@ import ( // All operations on a VariableSpec's underlying MapSpec are performed in the // host's native endianness. type VariableSpec struct { - name string - offset uint64 - size uint64 - - m *MapSpec - t *btf.Var + Name string + // Name of the section this variable was allocated in. + SectionName string + // Offset of the variable within the datasec. + Offset uint32 + // Byte representation of the variable's value. + Value []byte + // Type information of the variable. Optional. + Type *btf.Var } // Set sets the value of the VariableSpec to the provided input using the host's // native endianness. func (s *VariableSpec) Set(in any) error { - buf, err := sysenc.Marshal(in, int(s.size)) - if err != nil { - return fmt.Errorf("marshaling value %s: %w", s.name, err) + size := int(s.Size()) + if size == 0 { + bs := binary.Size(in) + if bs < 0 { + return fmt.Errorf("cannot determine binary size of value %v", in) + } + size = bs } - b, _, err := s.m.dataSection() - if err != nil { - return fmt.Errorf("getting data section of map %s: %w", s.m.Name, err) + if s.Value == nil { + s.Value = make([]byte, size) } - if int(s.offset+s.size) > len(b) { - return fmt.Errorf("offset %d(+%d) for variable %s is out of bounds", s.offset, s.size, s.name) + buf, err := sysenc.Marshal(in, size) + if err != nil { + return fmt.Errorf("marshaling value %s: %w", s.Name, err) } - // MapSpec.Copy() performs a shallow copy. Fully copy the byte slice - // to avoid any changes affecting other copies of the MapSpec. - cpy := make([]byte, len(b)) - copy(cpy, b) - - buf.CopyTo(cpy[s.offset : s.offset+s.size]) - - s.m.Contents[0] = MapKV{Key: uint32(0), Value: cpy} - + buf.CopyTo(s.Value) return nil } // Get writes the value of the VariableSpec to the provided output using the // host's native endianness. +// +// Returns an error if the variable is not initialized or if the unmarshaling fails. func (s *VariableSpec) Get(out any) error { - b, _, err := s.m.dataSection() - if err != nil { - return fmt.Errorf("getting data section of map %s: %w", s.m.Name, err) + if s.Value == nil { + return fmt.Errorf("variable is not initialized") } - if int(s.offset+s.size) > len(b) { - return fmt.Errorf("offset %d(+%d) for variable %s is out of bounds", s.offset, s.size, s.name) - } - - if err := sysenc.Unmarshal(out, b[s.offset:s.offset+s.size]); err != nil { + if err := sysenc.Unmarshal(out, s.Value); err != nil { return fmt.Errorf("unmarshaling value: %w", err) } @@ -72,61 +70,42 @@ func (s *VariableSpec) Get(out any) error { } // Size returns the size of the variable in bytes. -func (s *VariableSpec) Size() uint64 { - return s.size -} +func (s *VariableSpec) Size() uint32 { + if s.Value != nil { + return uint32(len(s.Value)) + } -// MapName returns the name of the underlying MapSpec. -func (s *VariableSpec) MapName() string { - return s.m.Name -} + if s.Type != nil { + size, err := btf.Sizeof(s.Type.Type) + if err != nil { + return 0 + } + return uint32(size) + } -// Offset returns the offset of the variable in the underlying MapSpec. -func (s *VariableSpec) Offset() uint64 { - return s.offset + return 0 } -// Constant returns true if the VariableSpec represents a variable that is -// read-only from the perspective of the BPF program. +// Constant returns true if the variable is located in a data section intended +// for constant values. func (s *VariableSpec) Constant() bool { - return s.m.readOnly() -} - -// Type returns the [btf.Var] representing the variable in its data section. -// This is useful for inspecting the variable's decl tags and the type -// information of the inner type. -// -// Returns nil if the original ELF object did not contain BTF information. -func (s *VariableSpec) Type() *btf.Var { - return s.t + return isConstantDataSection(s.SectionName) } func (s *VariableSpec) String() string { - return fmt.Sprintf("%s (type=%v, map=%s, offset=%d, size=%d)", s.name, s.t, s.m.Name, s.offset, s.size) + return fmt.Sprintf("%s (type=%v, section=%s, offset=%d, size=%d)", s.Name, s.Type, s.SectionName, s.Offset, s.Size()) } -// copy returns a new VariableSpec with the same values as the original, -// but with a different underlying MapSpec. This is useful when copying a -// CollectionSpec. Returns nil if a MapSpec with the same name is not found. -func (s *VariableSpec) copy(cpy *CollectionSpec) *VariableSpec { - out := &VariableSpec{ - name: s.name, - offset: s.offset, - size: s.size, - } - if s.t != nil { - out.t = btf.Copy(s.t).(*btf.Var) - } +// Copy the VariableSpec. +func (s *VariableSpec) Copy() *VariableSpec { + cpy := *s + cpy.Value = slices.Clone(s.Value) - // Attempt to find a MapSpec with the same name in the copied CollectionSpec. - for _, m := range cpy.Maps { - if m.Name == s.m.Name { - out.m = m - return out - } + if s.Type != nil { + cpy.Type = btf.Copy(s.Type).(*btf.Var) } - return nil + return &cpy } // Variable is a convenience wrapper for modifying global variables of a @@ -137,16 +116,16 @@ func (s *VariableSpec) copy(cpy *CollectionSpec) *VariableSpec { // [ErrNotSupported]. type Variable struct { name string - offset uint64 - size uint64 + offset uint32 + size uint32 t *btf.Var mm *Memory } -func newVariable(name string, offset, size uint64, t *btf.Var, mm *Memory) (*Variable, error) { +func newVariable(name string, offset, size uint32, t *btf.Var, mm *Memory) (*Variable, error) { if mm != nil { - if int(offset+size) > mm.Size() { + if offset+size > mm.Size() { return nil, fmt.Errorf("offset %d(+%d) is out of bounds", offset, size) } } @@ -161,7 +140,7 @@ func newVariable(name string, offset, size uint64, t *btf.Var, mm *Memory) (*Var } // Size returns the size of the variable. -func (v *Variable) Size() uint64 { +func (v *Variable) Size() uint32 { return v.size } @@ -240,7 +219,6 @@ func checkVariable[T any](v *Variable) error { } t := reflect.TypeFor[T]() - size := uint64(t.Size()) if t.Kind() == reflect.Uintptr && v.size == 8 { // uintptr is 8 bytes on 64-bit and 4 on 32-bit. In BPF/BTF, pointers are // always 8 bytes. For the sake of portability, allow accessing 8-byte BPF @@ -248,8 +226,8 @@ func checkVariable[T any](v *Variable) error { // pointer should be zero anyway. return nil } - if v.size != size { - return fmt.Errorf("can't create %d-byte accessor to %d-byte variable: %w", size, v.size, ErrInvalidType) + if uintptr(v.size) != t.Size() { + return fmt.Errorf("can't create %d-byte accessor to %d-byte variable: %w", t.Size(), v.size, ErrInvalidType) } return nil diff --git a/vendor/github.com/distribution/distribution/v3/.golangci.yml b/vendor/github.com/distribution/distribution/v3/.golangci.yml index 0f4d6ce0e..c0f93e547 100644 --- a/vendor/github.com/distribution/distribution/v3/.golangci.yml +++ b/vendor/github.com/distribution/distribution/v3/.golangci.yml @@ -1,27 +1,32 @@ -linters: +version: "2" + +formatters: enable: - - staticcheck - - unconvert - gofmt - goimports - - revive - - ineffassign - - govet - - unused - - misspell + +linters: + enable: - bodyclose + - misspell - prealloc - - errcheck + - revive + - staticcheck - tparallel + - unconvert -linters-settings: - revive: - rules: - # TODO(thaJeztah): temporarily disabled the "unused-parameter" check. - # It produces many warnings, and some of those may need to be looked at. - - name: unused-parameter - disabled: true + disable: + - errcheck -issues: - exclude-dirs: - - vendor + settings: + revive: + rules: + # TODO(thaJeztah): temporarily disabled the "unused-parameter" check. + # It produces many warnings, and some of those may need to be looked at. + - name: unused-parameter + disabled: true + staticcheck: + checks: + - all + - -QF1008 # Omit embedded fields from selector expression; https://staticcheck.dev/docs/checks/#QF1008 + - -ST1000 # Incorrect or missing package comment; https://staticcheck.dev/docs/checks/#ST1000 diff --git a/vendor/github.com/distribution/distribution/v3/ADOPTERS.md b/vendor/github.com/distribution/distribution/v3/ADOPTERS.md index 2fdce0192..2af06b5eb 100644 --- a/vendor/github.com/distribution/distribution/v3/ADOPTERS.md +++ b/vendor/github.com/distribution/distribution/v3/ADOPTERS.md @@ -9,3 +9,5 @@ Harbor, CNCF Graduated project https://goharbor.io/ VMware Harbor Registry https://docs.pivotal.io/partners/vmware-harbor/index.html DigitalOcean Container Registry https://www.digitalocean.com/products/container-registry/ + +Cloudfleet Container Registry https://cloudfleet.ai/docs/container-registry/overview/ diff --git a/vendor/github.com/distribution/distribution/v3/AUTHORS b/vendor/github.com/distribution/distribution/v3/AUTHORS index 360a28452..ae49a4a76 100644 --- a/vendor/github.com/distribution/distribution/v3/AUTHORS +++ b/vendor/github.com/distribution/distribution/v3/AUTHORS @@ -1,6 +1,7 @@ # This file lists all individuals having contributed content to the repository. # For how it is generated, see dockerfiles/authors.Dockerfile. +1seal a-palchikov Aaron Lehmann Aaron Schlesinger @@ -11,6 +12,7 @@ Adam Enger Adam Kaplan Adam Wolfe Gordon AdamKorcz +Adrian Callejas Adrian Mouat Adrian Plata Adrien Duermael @@ -25,6 +27,7 @@ Alex Laties Alexander Larsson Alexander Morozov Alexey Gladkov +Alexsandr <78373713+Alexsandr-Random@users.noreply.github.com> Alfonso Acosta allencloud Alvin Feng @@ -43,6 +46,7 @@ Andrews Medina Andrey Kostov Andrey Smirnov Andrii Soldatenko +Andy Castille Andy Goldstein andy-cooper andyzhangx @@ -58,9 +62,12 @@ Antonio Mercado Antonio Murdaca Antonio Ojea Anusha Ragunathan +Anže Luzar Arien Holthuizen Arko Dasgupta Arnaud Porterie +Artem Khoroshev +Artem Tkachuk Arthur Baars Arthur Gautier Asuka Suzuki @@ -92,8 +99,10 @@ Caleb Spare Carson A Cezar Sa Espinola Chad Faragher +ChandonPierre Chaos John Charles Smith +Chen Qi Cheng Zheng chlins Chris Aniszczyk @@ -105,6 +114,8 @@ Christy Perez Chuanying Du Chun-Hung Hsiao Clayton Coleman +closeobserve +Cloudfleet Technology Team <187966520+cloudfleet-tech@users.noreply.github.com> Collin Shoop Corey Quon Cory Snider @@ -119,6 +130,7 @@ Daisuke Fujita Damien Mathieu Dan Fredell Dan Walsh +Dane Wagner Daniel Helfand Daniel Huhn Daniel Menet @@ -128,6 +140,7 @@ Daniel, Dao Quang Minh Danila Fominykh Darren Shepherd Dave +Dave Pedu Dave Trombley Dave Tucker David Calavera @@ -162,6 +175,7 @@ duanhongyi ducksecops E. M. Bray Edgar Lee +efcking Elliot Pahl elsanli(李楠) Elton Stoneman @@ -211,6 +225,7 @@ Grachev Mikhail Grant Watters Greg Rebholz Guillaume J. Charmes +Guillaume pelletier Guillaume Rose guoguangwu Gábor Lipták @@ -219,6 +234,7 @@ hasheddan Hayley Swimelar Helen-xie Henri Gomez +HexMix <32300164+mnixry@users.noreply.github.com> Honglin Feng Hu Keping Hua Wang @@ -273,6 +289,8 @@ Jonas Hecht Jonathan Boulle Jonathan Lee Jonathan Rudenberg +Joonas Bergius +Joonas Bergius Jordan Liggitt Jose D. Gomez R Josh Chorlton @@ -299,6 +317,7 @@ Kevin Robatel Kira Kirat Singh krynju +kub3let <95883234+kub3let@users.noreply.github.com> Kyle Squizzato Kyle Squizzato L-Hudson <44844738+L-Hudson@users.noreply.github.com> @@ -324,6 +343,7 @@ lostsquirrel Louis Kottmann Luca Bruno Lucas França de Oliveira +Lucas Melchior Lucas Santos Luis Lobo Borobia Luke Carpenter @@ -385,11 +405,15 @@ Nghia Tran Nicolas De Loof Nikita Tarasov ning xie +ningmingxiao Nishant Totla +njucjc +nkaaf Noah Treuhaft Novak Ivanovski Nuutti Kotivuori Nycholas de Oliveira e Oliveira +Oded Porat Oilbeater Oleg Bulatov Oleg Gnusarev @@ -399,12 +423,14 @@ Olivier Olivier Gambier Olivier Jacques ollypom +Omar Trigui Omer Cohen Oscar Caballero Owen W. Taylor paigehargrave Parth Mehrotra Pascal Borreli +Pat Riehecky Patrick Devine Patrick Easters Paul Cacheux @@ -424,6 +450,8 @@ Qiang Huang Qiao Anran Radon Rosborough Rafael Fonseca +Raghav Mahajan +Raj Siva-Rajah Randy Barlow Raphaël Enrici Ricardo Maraschini @@ -445,6 +473,7 @@ Ryan Abrams Ryan Thomas sakeven Sam Alba +Sam Jia Samuel Karp sangluo Santiago Torres @@ -456,6 +485,7 @@ Sebastiaan van Stijn Sebastien Coavoux Serge Dubrouski Sevki Hasirci +Shan Desai Sharif Nassar Shawn Chen Shawn Falkner-Horine @@ -486,6 +516,7 @@ Steven Hanna Steven Kalt Steven Taylor stonezdj +Sumedh Vats sun jian Sungho Moon Sven Dowideit @@ -501,6 +532,7 @@ Ted Reed Terin Stock tgic Thomas Berger +Thomas Cuthbert Thomas Sjögren Thomas Way Tianon Gravi @@ -516,6 +548,7 @@ tomoya-kawaguchi Tonis Tiigi Tony Holdstock-Brown Tosone +tranthang2404 Trapier Marshall Trevor Pounds Trevor Wood @@ -548,8 +581,10 @@ Wei Meng weiyuan.yl Wen-Quan Li Wenkai Yin +whosehang william wei <1342247033@qq.com> xg.song +Xiaolei.Liang xiaoxiangxianzi xiekeyang Xueshan Feng @@ -560,6 +595,7 @@ yixi zhang Yong Tang Yong Wen Chua Yongxin Li +Youfu Zhang Yu Wang yuexiao-wang YuJie <390282283@qq.com> diff --git a/vendor/github.com/distribution/distribution/v3/BUILDING.md b/vendor/github.com/distribution/distribution/v3/BUILDING.md index 5867e2aab..9285c3fe4 100644 --- a/vendor/github.com/distribution/distribution/v3/BUILDING.md +++ b/vendor/github.com/distribution/distribution/v3/BUILDING.md @@ -9,7 +9,7 @@ This is useful if you intend to actively work on the registry. Most people should use prebuilt images, for example, the [Registry docker image](https://hub.docker.com/r/library/registry/) provided by Docker. -People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`. +People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:3`. The latest updates to `main` branch are automatically pushed to [distribution Docker Hub repository](https://hub.docker.com/r/distribution/distribution) and tagged with `edge` tag. diff --git a/vendor/github.com/distribution/distribution/v3/Dockerfile b/vendor/github.com/distribution/distribution/v3/Dockerfile index 8cbce906b..d3fa0c57f 100644 --- a/vendor/github.com/distribution/distribution/v3/Dockerfile +++ b/vendor/github.com/distribution/distribution/v3/Dockerfile @@ -1,8 +1,17 @@ # syntax=docker/dockerfile:1 -ARG GO_VERSION=1.23.7 -ARG ALPINE_VERSION=3.21 -ARG XX_VERSION=1.6.1 +# GO_VERSION sets the version of the golang base image to use. +# It must be a supported tag in the docker.io/library/golang image repository. +ARG GO_VERSION=1.25.8 + +# ALPINE_VERSION sets the version of the alpine base image to use, including for the golang image. +# It must be a supported tag in the docker.io/library/alpine image repository +# that's also available as alpine image variant for the Golang version used. +ARG ALPINE_VERSION=3.23 + +# XX_VERSION sets the version of the tonistiigi/xx utility to use. +# It must be a valid tag in the docker.io/tonistiigi/xx image repository. +ARG XX_VERSION=1.9.0 FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS base @@ -55,6 +64,7 @@ RUN apk add --no-cache ca-certificates COPY cmd/registry/config-dev.yml /etc/distribution/config.yml COPY --from=binary /registry /bin/registry VOLUME ["/var/lib/registry"] +ENV OTEL_TRACES_EXPORTER=none EXPOSE 5000 ENTRYPOINT ["registry"] CMD ["serve", "/etc/distribution/config.yml"] diff --git a/vendor/github.com/distribution/distribution/v3/Makefile b/vendor/github.com/distribution/distribution/v3/Makefile index 9647ed9a0..9bf6e85cd 100644 --- a/vendor/github.com/distribution/distribution/v3/Makefile +++ b/vendor/github.com/distribution/distribution/v3/Makefile @@ -166,7 +166,7 @@ test-azure-storage: start-azure-storage run-azure-tests stop-azure-storage ## ru .PHONY: start-azure-storage start-azure-storage: ## start local Azure storage (Azurite) - $(COMPOSE) -f tests/docker-compose-azure-blob-store.yaml up azurite azurite-init -d + $(COMPOSE) -f tests/docker-compose-azure-blob-store.yaml up azurite azurite-init -d --wait .PHONY: stop-azure-storage stop-azure-storage: ## stop local Azure storage (minio) diff --git a/vendor/github.com/distribution/distribution/v3/blobs.go b/vendor/github.com/distribution/distribution/v3/blobs.go index 3de30b592..80f6cebba 100644 --- a/vendor/github.com/distribution/distribution/v3/blobs.go +++ b/vendor/github.com/distribution/distribution/v3/blobs.go @@ -157,7 +157,7 @@ type BlobIngester interface { // BlobIngester receiving them. // TODO (brianbland): unify this with ManifestServiceOption in the future type BlobCreateOption interface { - Apply(interface{}) error + Apply(any) error } // CreateOptions is a collection of blob creation modifiers relevant to general diff --git a/vendor/github.com/distribution/distribution/v3/configuration/configuration.go b/vendor/github.com/distribution/distribution/v3/configuration/configuration.go index 36105b841..4d1e326ec 100644 --- a/vendor/github.com/distribution/distribution/v3/configuration/configuration.go +++ b/vendor/github.com/distribution/distribution/v3/configuration/configuration.go @@ -8,8 +8,14 @@ import ( "reflect" "strings" "time" +) + +const ( + // defaultMaxEntries is the default max number of entries returned by the catalog endpoint + defaultMaxEntries = 1000 - "github.com/redis/go-redis/v9" + // defaultMaxTags is the default max number of tags returned by the tags endpoint + defaultMaxTags = 1000 ) // Configuration is a versioned registry configuration, intended to be provided by a yaml file, and @@ -60,6 +66,10 @@ type Configuration struct { // options to control the maximum number of entries returned by the catalog endpoint. Catalog Catalog `yaml:"catalog,omitempty"` + // Tags provides configuration for the tags list (/v2//tags/list) endpoint. + // It allows specifying the maximum number of tags returned by the endpoint. + Tags Tags `yaml:"tags,omitempty"` + // Proxy defines the configuration options for using the registry as a pull-through cache. Proxy Proxy `yaml:"proxy,omitempty"` @@ -107,7 +117,7 @@ type Log struct { // Fields allows users to specify static string fields to include in // the logger context. - Fields map[string]interface{} `yaml:"fields,omitempty"` + Fields map[string]any `yaml:"fields,omitempty"` // Hooks allows users to configure the log hooks, to enabling the // sequent handling behavior, when defined levels of log message emit. @@ -265,6 +275,14 @@ type LetsEncrypt struct { DirectoryURL string `yaml:"directoryurl,omitempty"` } +// Tags provides configuration options for the "/v2//tags/list" endpoint. +type Tags struct { + // MaxTags limits the maximum number of tags returned by the tags endpoint. + // Requesting n tags to the tags endpoint will return at most MaxTags tags. + // Default to 1000 tags if not set. + MaxTags int `yaml:"maxtags,omitempty"` +} + // LogHook is composed of hook Level and Type. // After hooks configuration, it can execute the next handling automatically, // when defined levels of log message emitted. @@ -411,7 +429,7 @@ type v0_1Configuration Configuration // UnmarshalYAML implements the yaml.Unmarshaler interface // Unmarshals a string of the form X.Y into a Version, validating that X and Y can represent unsigned integers -func (version *Version) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (version *Version) UnmarshalYAML(unmarshal func(any) error) error { var versionString string err := unmarshal(&versionString) if err != nil { @@ -441,7 +459,7 @@ type Loglevel string // UnmarshalYAML implements the yaml.Umarshaler interface // Unmarshals a string into a Loglevel, lowercasing the string and validating that it represents a // valid loglevel -func (loglevel *Loglevel) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (loglevel *Loglevel) UnmarshalYAML(unmarshal func(any) error) error { var loglevelString string err := unmarshal(&loglevelString) if err != nil { @@ -460,7 +478,7 @@ func (loglevel *Loglevel) UnmarshalYAML(unmarshal func(interface{}) error) error } // Parameters defines a key-value parameters mapping -type Parameters map[string]interface{} +type Parameters map[string]any // Storage defines the configuration for registry object storage type Storage map[string]Parameters @@ -501,7 +519,7 @@ func (storage Storage) TagParameters() Parameters { } // setTagParameter changes the parameter at the provided key to the new value -func (storage Storage) setTagParameter(key string, value interface{}) { +func (storage Storage) setTagParameter(key string, value any) { if _, ok := storage["tag"]; !ok { storage["tag"] = make(Parameters) } @@ -514,13 +532,13 @@ func (storage Storage) Parameters() Parameters { } // setParameter changes the parameter at the provided key to the new value -func (storage Storage) setParameter(key string, value interface{}) { +func (storage Storage) setParameter(key string, value any) { storage[storage.Type()][key] = value } // UnmarshalYAML implements the yaml.Unmarshaler interface // Unmarshals a single item map into a Storage or a string into a Storage type with no parameters -func (storage *Storage) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (storage *Storage) UnmarshalYAML(unmarshal func(any) error) error { var storageMap map[string]Parameters err := unmarshal(&storageMap) if err == nil { @@ -562,7 +580,7 @@ func (storage *Storage) UnmarshalYAML(unmarshal func(interface{}) error) error { } // MarshalYAML implements the yaml.Marshaler interface -func (storage Storage) MarshalYAML() (interface{}, error) { +func (storage Storage) MarshalYAML() (any, error) { if storage.Parameters() == nil { return storage.Type(), nil } @@ -587,13 +605,13 @@ func (auth Auth) Parameters() Parameters { } // setParameter changes the parameter at the provided key to the new value -func (auth Auth) setParameter(key string, value interface{}) { +func (auth Auth) setParameter(key string, value any) { auth[auth.Type()][key] = value } // UnmarshalYAML implements the yaml.Unmarshaler interface // Unmarshals a single item map into a Storage or a string into a Storage type with no parameters -func (auth *Auth) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (auth *Auth) UnmarshalYAML(unmarshal func(any) error) error { var m map[string]Parameters err := unmarshal(&m) if err == nil { @@ -623,7 +641,7 @@ func (auth *Auth) UnmarshalYAML(unmarshal func(interface{}) error) error { } // MarshalYAML implements the yaml.Marshaler interface -func (auth Auth) MarshalYAML() (interface{}, error) { +func (auth Auth) MarshalYAML() (any, error) { if auth.Parameters() == nil { return auth.Type(), nil } @@ -694,6 +712,12 @@ type Proxy struct { // if not set, defaults to 7 * 24 hours // If set to zero, will never expire cache TTL *time.Duration `yaml:"ttl,omitempty"` + + // CacheWriteTimeout is the maximum duration allowed for cache write operations + // to complete when pulling blobs from the remote registry. This timeout ensures + // that cache writes don't hang indefinitely if the storage backend is slow. + // If not set, defaults to 5 minutes. + CacheWriteTimeout *time.Duration `yaml:"cachewritetimeout,omitempty"` } // ExecConfig defines the configuration for executing a command as a credential helper. @@ -762,7 +786,7 @@ type Platforms string // UnmarshalYAML implements the yaml.Umarshaler interface // Unmarshals a string into a Platforms option, lowercasing the string and validating that it represents a // valid option -func (platforms *Platforms) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (platforms *Platforms) UnmarshalYAML(unmarshal func(any) error) error { var platformsString string err := unmarshal(&platformsString) if err != nil { @@ -796,8 +820,8 @@ func Parse(rd io.Reader) (*Configuration, error) { p := NewParser("registry", []VersionedParseInfo{ { Version: MajorMinorVersion(0, 1), - ParseAs: reflect.TypeOf(v0_1Configuration{}), - ConversionFunc: func(c interface{}) (interface{}, error) { + ParseAs: reflect.TypeFor[v0_1Configuration](), + ConversionFunc: func(c any) (any, error) { if v0_1, ok := c.(*v0_1Configuration); ok { if v0_1.Log.Level == Loglevel("") { if v0_1.Loglevel != Loglevel("") { @@ -811,7 +835,14 @@ func Parse(rd io.Reader) (*Configuration, error) { } if v0_1.Catalog.MaxEntries <= 0 { - v0_1.Catalog.MaxEntries = 1000 + v0_1.Catalog.MaxEntries = defaultMaxEntries + } + + if v0_1.Tags.MaxTags <= 0 { + if v0_1.Tags.MaxTags < 0 { + return nil, errors.New("maxtags limit must be a non-negative integer value") + } + v0_1.Tags.MaxTags = defaultMaxTags } if v0_1.Storage.Type() == "" { @@ -833,10 +864,105 @@ func Parse(rd io.Reader) (*Configuration, error) { return config, nil } -// RedisOptions represents the configuration options for Redis, which are -// provided by the redis package. This struct can be used to configure the -// connection to Redis in a universal (clustered or standalone) setup. -type RedisOptions = redis.UniversalOptions +// RedisOptions represents the configuration options for Redis. This struct can be used +// to configure the connection to Redis in a universal (clustered or standalone) setup. +type RedisOptions struct { + // Addrs is either a single address or a seed list of host:port addresses + // of cluster/sentinel nodes. + Addrs []string `yaml:"addrs,omitempty"` + + // ClientName will execute the `CLIENT SETNAME ClientName` command for each connection. + ClientName string `yaml:"clientname,omitempty"` + + // DB is the database to be selected after connecting to the server. + // Only applicable to single-node and failover clients. + DB int `yaml:"db,omitempty"` + + // Protocol specifies the Redis protocol version to use. + Protocol int `yaml:"protocol,omitempty"` + + // Username for authentication (used with ACLs). + Username string `yaml:"username,omitempty"` + + // Password for authentication. + Password string `yaml:"password,omitempty"` + + // SentinelUsername is the username for Sentinel authentication. + SentinelUsername string `yaml:"sentinelusername,omitempty"` + + // SentinelPassword is the password for Sentinel authentication. + SentinelPassword string `yaml:"sentinelpassword,omitempty"` + + // MaxRetries is the maximum number of retries before giving up. + MaxRetries int `yaml:"maxretries,omitempty"` + + // MinRetryBackoff is the minimum backoff between each retry. + MinRetryBackoff time.Duration `yaml:"minretrybackoff,omitempty"` + + // MaxRetryBackoff is the maximum backoff between each retry. + MaxRetryBackoff time.Duration `yaml:"maxretrybackoff,omitempty"` + + // DialTimeout is the timeout for establishing new connections. + DialTimeout time.Duration `yaml:"dialtimeout,omitempty"` + + // ReadTimeout is the timeout for reading a single command reply. + ReadTimeout time.Duration `yaml:"readtimeout,omitempty"` + + // WriteTimeout is the timeout for writing a single command. + WriteTimeout time.Duration `yaml:"writetimeout,omitempty"` + + // ContextTimeoutEnabled enables wrapping operations with a context timeout. + ContextTimeoutEnabled bool `yaml:"contexttimeoutenabled,omitempty"` + + // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default is LIFO). + PoolFIFO bool `yaml:"poolfifo,omitempty"` + + // PoolSize is the maximum number of socket connections. + PoolSize int `yaml:"poolsize,omitempty"` + + // PoolTimeout is the amount of time a client waits for a connection if all are busy. + PoolTimeout time.Duration `yaml:"pooltimeout,omitempty"` + + // MinIdleConns is the minimum number of idle connections maintained in the pool. + MinIdleConns int `yaml:"minidleconns,omitempty"` + + // MaxIdleConns is the maximum number of idle connections. + MaxIdleConns int `yaml:"maxidleconns,omitempty"` + + // MaxActiveConns is the maximum number of active connections (cluster mode only). + MaxActiveConns int `yaml:"maxactiveconns,omitempty"` + + // ConnMaxIdleTime is the maximum amount of time a connection can be idle. + ConnMaxIdleTime time.Duration `yaml:"connmaxidletime,omitempty"` + + // ConnMaxLifetime is the maximum lifetime of a connection. + ConnMaxLifetime time.Duration `yaml:"connmaxlifetime,omitempty"` + + // MaxRedirects is the maximum number of redirects to follow in cluster mode. + MaxRedirects int `yaml:"maxredirects,omitempty"` + + // ReadOnly enables read-only mode for cluster clients. + ReadOnly bool `yaml:"readonly,omitempty"` + + // RouteByLatency routes commands to the closest node based on latency. + RouteByLatency bool `yaml:"routebylatency,omitempty"` + + // RouteRandomly routes commands randomly among eligible nodes. + RouteRandomly bool `yaml:"routerandomly,omitempty"` + + // MasterName is the Sentinel master name. + // Only applicable for failover clients. + MasterName string `yaml:"mastername,omitempty"` + + // DisableIdentity disables the CLIENT SETINFO command on connect. + DisableIdentity bool `yaml:"disableidentity,omitempty"` + + // IdentitySuffix is an optional suffix for CLIENT SETINFO. + IdentitySuffix string `yaml:"identitysuffix,omitempty"` + + // UnstableResp3 enables RESP3 features that are not finalized yet. + UnstableResp3 bool `yaml:"unstableresp3,omitempty"` +} // RedisTLSOptions configures the TLS (Transport Layer Security) settings for // Redis connections, allowing secure communication over the network. @@ -849,9 +975,9 @@ type RedisTLSOptions struct { // This key is used to authenticate the client during the TLS handshake. Key string `yaml:"key,omitempty"` - // ClientCAs specifies a list of certificates to be used to verify the server's - // certificate during the TLS handshake. This can be used for mutual TLS authentication. - ClientCAs []string `yaml:"clientcas,omitempty"` + // RootCAs specifies a list of root certificate authorities that clients use when + // verifying server certificates. If RootCAs is nil, TLS uses the host's root CA set. + RootCAs []string `yaml:"rootcas,omitempty"` } // Redis represents the configuration for connecting to a Redis server. It includes @@ -867,162 +993,6 @@ type Redis struct { TLS RedisTLSOptions `yaml:"tls,omitempty"` } -func (c Redis) MarshalYAML() (interface{}, error) { - fields := make(map[string]interface{}) - - val := reflect.ValueOf(c.Options) - typ := val.Type() - - for i := 0; i < val.NumField(); i++ { - field := typ.Field(i) - fieldValue := val.Field(i) - - // ignore funcs fields in redis.UniversalOptions - if fieldValue.Kind() == reflect.Func { - continue - } - - fields[strings.ToLower(field.Name)] = fieldValue.Interface() - } - - // Add TLS fields if they're not empty - if c.TLS.Certificate != "" || c.TLS.Key != "" || len(c.TLS.ClientCAs) > 0 { - fields["tls"] = c.TLS - } - - return fields, nil -} - -func (c *Redis) UnmarshalYAML(unmarshal func(interface{}) error) error { - var fields map[string]interface{} - err := unmarshal(&fields) - if err != nil { - return err - } - - val := reflect.ValueOf(&c.Options).Elem() - typ := val.Type() - - for i := 0; i < typ.NumField(); i++ { - field := typ.Field(i) - fieldName := strings.ToLower(field.Name) - - if value, ok := fields[fieldName]; ok { - fieldValue := val.Field(i) - if fieldValue.CanSet() { - switch field.Type { - case reflect.TypeOf(time.Duration(0)): - durationStr, ok := value.(string) - if !ok { - return fmt.Errorf("invalid duration value for field: %s", fieldName) - } - duration, err := time.ParseDuration(durationStr) - if err != nil { - return fmt.Errorf("failed to parse duration for field: %s, error: %v", fieldName, err) - } - fieldValue.Set(reflect.ValueOf(duration)) - default: - if err := setFieldValue(fieldValue, value); err != nil { - return fmt.Errorf("failed to set value for field: %s, error: %v", fieldName, err) - } - } - } - } - } - - // Handle TLS fields - if tlsData, ok := fields["tls"]; ok { - tlsMap, ok := tlsData.(map[interface{}]interface{}) - if !ok { - return fmt.Errorf("invalid TLS data structure") - } - - if cert, ok := tlsMap["certificate"]; ok { - var isString bool - c.TLS.Certificate, isString = cert.(string) - if !isString { - return fmt.Errorf("Redis TLS certificate must be a string") - } - } - if key, ok := tlsMap["key"]; ok { - var isString bool - c.TLS.Key, isString = key.(string) - if !isString { - return fmt.Errorf("Redis TLS (private) key must be a string") - } - } - if cas, ok := tlsMap["clientcas"]; ok { - caList, ok := cas.([]interface{}) - if !ok { - return fmt.Errorf("invalid clientcas data structure") - } - for _, ca := range caList { - if caStr, ok := ca.(string); ok { - c.TLS.ClientCAs = append(c.TLS.ClientCAs, caStr) - } - } - } - } - - return nil -} - -func setFieldValue(field reflect.Value, value interface{}) error { - if value == nil { - return nil - } - - switch field.Kind() { - case reflect.String: - stringValue, ok := value.(string) - if !ok { - return fmt.Errorf("failed to convert value to string") - } - field.SetString(stringValue) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - intValue, ok := value.(int) - if !ok { - return fmt.Errorf("failed to convert value to integer") - } - field.SetInt(int64(intValue)) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - uintValue, ok := value.(uint) - if !ok { - return fmt.Errorf("failed to convert value to unsigned integer") - } - field.SetUint(uint64(uintValue)) - case reflect.Float32, reflect.Float64: - floatValue, ok := value.(float64) - if !ok { - return fmt.Errorf("failed to convert value to float") - } - field.SetFloat(floatValue) - case reflect.Bool: - boolValue, ok := value.(bool) - if !ok { - return fmt.Errorf("failed to convert value to boolean") - } - field.SetBool(boolValue) - case reflect.Slice: - slice := reflect.MakeSlice(field.Type(), 0, 0) - valueSlice, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("failed to convert value to slice") - } - for _, item := range valueSlice { - sliceValue := reflect.New(field.Type().Elem()).Elem() - if err := setFieldValue(sliceValue, item); err != nil { - return err - } - slice = reflect.Append(slice, sliceValue) - } - field.Set(slice) - default: - return fmt.Errorf("unsupported field type: %v", field.Type()) - } - return nil -} - const ( ClientAuthRequestClientCert = "request-client-cert" ClientAuthRequireAnyClientCert = "require-any-client-cert" @@ -1034,7 +1004,7 @@ type ClientAuth string // UnmarshalYAML implements the yaml.Umarshaler interface // Unmarshals a string into a ClientAuth, validating that it represents a valid ClientAuth mod -func (clientAuth *ClientAuth) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (clientAuth *ClientAuth) UnmarshalYAML(unmarshal func(any) error) error { var clientAuthString string err := unmarshal(&clientAuthString) if err != nil { diff --git a/vendor/github.com/distribution/distribution/v3/configuration/parser.go b/vendor/github.com/distribution/distribution/v3/configuration/parser.go index 1a9201986..0fd57d65b 100644 --- a/vendor/github.com/distribution/distribution/v3/configuration/parser.go +++ b/vendor/github.com/distribution/distribution/v3/configuration/parser.go @@ -57,7 +57,7 @@ type VersionedParseInfo struct { // ConversionFunc defines a method for converting the parsed configuration // (of type ParseAs) into the current configuration version // Note: this method signature is very unclear with the absence of generics - ConversionFunc func(interface{}) (interface{}, error) + ConversionFunc func(any) (any, error) } type envVar struct { @@ -111,7 +111,7 @@ func NewParser(prefix string, parseInfos []VersionedParseInfo) *Parser { // than version, following the scheme below: // v.Abc may be replaced by the value of PREFIX_ABC, // v.Abc.Xyz may be replaced by the value of PREFIX_ABC_XYZ, and so forth -func (p *Parser) Parse(in []byte, v interface{}) error { +func (p *Parser) Parse(in []byte, v any) error { var versionedStruct struct { Version Version } @@ -157,7 +157,7 @@ func (p *Parser) Parse(in []byte, v interface{}) error { func (p *Parser) overwriteFields(v reflect.Value, fullpath string, path []string, payload string) error { for v.Kind() == reflect.Ptr { if v.IsNil() { - panic("encountered nil pointer while handling environment variable " + fullpath) + return fmt.Errorf("encountered nil pointer while handling environment variable %s", fullpath) } v = reflect.Indirect(v) } @@ -169,11 +169,11 @@ func (p *Parser) overwriteFields(v reflect.Value, fullpath string, path []string case reflect.Slice: idx, err := strconv.Atoi(path[0]) if err != nil { - panic("non-numeric index: " + path[0]) + return fmt.Errorf("non-numeric index: %s", path[0]) } if idx > v.Len() { - panic("undefined index: " + path[0]) + return fmt.Errorf("undefined index: %s", path[0]) } // if there is no element or the current slice length @@ -191,7 +191,7 @@ func (p *Parser) overwriteFields(v reflect.Value, fullpath string, path []string return p.overwriteFields(v.Elem(), fullpath, path, payload) } // Interface was empty; create an implicit map - var template map[string]interface{} + var template map[string]any wrappedV := reflect.MakeMap(reflect.TypeOf(template)) v.Set(wrappedV) return p.overwriteMap(wrappedV, fullpath, path, payload) @@ -205,9 +205,24 @@ func (p *Parser) overwriteStruct(v reflect.Value, fullpath string, path []string byUpperCase := make(map[string]int) for i := 0; i < v.NumField(); i++ { sf := v.Type().Field(i) + + // For fields inlined in the YAML configuration file, the environment variables also need to be inlined + // Example struct tag for inlined field: `yaml:",inline"` + _, yamlOpts, _ := strings.Cut(sf.Tag.Get("yaml"), ",") + if yamlOpts == "inline" && sf.Type.Kind() == reflect.Struct { + // Inlined struct, check whether the env variable corresponds to a field inside this struct + // Maps could also be inlined, but since we don't need it right now it is not supported + inlined := v.Field(i) + for j := range inlined.NumField() { + if strings.EqualFold(inlined.Type().Field(j).Name, path[0]) { + return p.overwriteFields(inlined, fullpath, path, payload) + } + } + } + upper := strings.ToUpper(sf.Name) if _, present := byUpperCase[upper]; present { - panic(fmt.Sprintf("field name collision in configuration object: %s", sf.Name)) + return fmt.Errorf("field name collision in configuration object: %s", sf.Name) } byUpperCase[upper] = i } diff --git a/vendor/github.com/distribution/distribution/v3/internal/client/auth/api_version.go b/vendor/github.com/distribution/distribution/v3/internal/client/auth/api_version.go index 7d8f1d957..21200223b 100644 --- a/vendor/github.com/distribution/distribution/v3/internal/client/auth/api_version.go +++ b/vendor/github.com/distribution/distribution/v3/internal/client/auth/api_version.go @@ -29,7 +29,7 @@ func APIVersions(resp *http.Response, versionHeader string) []APIVersion { versions := []APIVersion{} if versionHeader != "" { for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey(versionHeader)] { - for _, version := range strings.Fields(supportedVersions) { + for version := range strings.FieldsSeq(supportedVersions) { versions = append(versions, ParseAPIVersion(version)) } } diff --git a/vendor/github.com/distribution/distribution/v3/internal/client/auth/challenge/authchallenge.go b/vendor/github.com/distribution/distribution/v3/internal/client/auth/challenge/authchallenge.go index 4d26e5688..6d479c38f 100644 --- a/vendor/github.com/distribution/distribution/v3/internal/client/auth/challenge/authchallenge.go +++ b/vendor/github.com/distribution/distribution/v3/internal/client/auth/challenge/authchallenge.go @@ -35,7 +35,7 @@ func init() { // token = 1* // qdtext = > - for c := 0; c < 256; c++ { + for c := range 256 { var t octetType isCtl := c <= 31 || c == 127 isChar := 0 <= c && c <= 127 diff --git a/vendor/github.com/distribution/distribution/v3/internal/client/auth/challenge/filteringmanager.go b/vendor/github.com/distribution/distribution/v3/internal/client/auth/challenge/filteringmanager.go new file mode 100644 index 000000000..2e7f4af8b --- /dev/null +++ b/vendor/github.com/distribution/distribution/v3/internal/client/auth/challenge/filteringmanager.go @@ -0,0 +1,46 @@ +package challenge + +import ( + "net/http" + "net/url" +) + +// FilteringManager decorates another Manager and drops challenges that do not +// satisfy the configured predicate. +type FilteringManager struct { + base Manager + keep func(Challenge) bool +} + +// NewFilteringManager returns a Manager that delegates storage to base and +// filters challenges on reads. If keep is nil, the base manager is returned. +func NewFilteringManager(base Manager, keep func(Challenge) bool) Manager { + if keep == nil { + return base + } + + return FilteringManager{ + base: base, + keep: keep, + } +} + +func (m FilteringManager) GetChallenges(endpoint url.URL) ([]Challenge, error) { + challenges, err := m.base.GetChallenges(endpoint) + if err != nil { + return nil, err + } + + filtered := make([]Challenge, 0, len(challenges)) + for _, c := range challenges { + if m.keep(c) { + filtered = append(filtered, c) + } + } + + return filtered, nil +} + +func (m FilteringManager) AddResponse(resp *http.Response) error { + return m.base.AddResponse(resp) +} diff --git a/vendor/github.com/distribution/distribution/v3/internal/client/auth/session.go b/vendor/github.com/distribution/distribution/v3/internal/client/auth/session.go index 104290ab4..4f33a649e 100644 --- a/vendor/github.com/distribution/distribution/v3/internal/client/auth/session.go +++ b/vendor/github.com/distribution/distribution/v3/internal/client/auth/session.go @@ -7,6 +7,7 @@ import ( "fmt" "net/http" "net/url" + "slices" "strings" "sync" "time" @@ -178,10 +179,10 @@ func (rs RegistryScope) String() string { // Logger defines the injectable logging interface, used on TokenHandlers. type Logger interface { - Debugf(format string, args ...interface{}) + Debugf(format string, args ...any) } -func logDebugf(logger Logger, format string, args ...interface{}) { +func logDebugf(logger Logger, format string, args ...any) { if logger == nil { return } @@ -305,12 +306,7 @@ func (th *tokenHandler) getToken(ctx context.Context, params map[string]string, } func hasScope(scopes []string, scope string) bool { - for _, s := range scopes { - if s == scope { - return true - } - } - return false + return slices.Contains(scopes, scope) } type postTokenResponse struct { diff --git a/vendor/github.com/distribution/distribution/v3/internal/client/blob_writer.go b/vendor/github.com/distribution/distribution/v3/internal/client/blob_writer.go index a4a9cdec5..ce4425582 100644 --- a/vendor/github.com/distribution/distribution/v3/internal/client/blob_writer.go +++ b/vendor/github.com/distribution/distribution/v3/internal/client/blob_writer.go @@ -123,7 +123,7 @@ func (hbu *httpBlobUpload) StartedAt() time.Time { func (hbu *httpBlobUpload) Commit(ctx context.Context, desc v1.Descriptor) (v1.Descriptor, error) { // TODO(dmcgowan): Check if already finished, if so just fetch - req, err := http.NewRequestWithContext(hbu.ctx, http.MethodPut, hbu.location, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodPut, hbu.location, nil) if err != nil { return v1.Descriptor{}, err } @@ -146,7 +146,7 @@ func (hbu *httpBlobUpload) Commit(ctx context.Context, desc v1.Descriptor) (v1.D } func (hbu *httpBlobUpload) Cancel(ctx context.Context) error { - req, err := http.NewRequestWithContext(hbu.ctx, http.MethodDelete, hbu.location, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodDelete, hbu.location, nil) if err != nil { return err } diff --git a/vendor/github.com/distribution/distribution/v3/internal/client/repository.go b/vendor/github.com/distribution/distribution/v3/internal/client/repository.go index 4f8659306..71c8c6b7a 100644 --- a/vendor/github.com/distribution/distribution/v3/internal/client/repository.go +++ b/vendor/github.com/distribution/distribution/v3/internal/client/repository.go @@ -10,6 +10,7 @@ import ( "net/http" "net/url" "path" + "slices" "strconv" "strings" "time" @@ -45,14 +46,7 @@ func checkHTTPRedirect(req *http.Request, via []*http.Request) error { // Don't add to redirected request if redirected // request already has a header with the same // name and value. - hasValue := false - for _, existingVal := range req.Header[headerName] { - if existingVal == val { - hasValue = true - break - } - } - if !hasValue { + if !slices.Contains(req.Header[headerName], val) { req.Header.Add(headerName, val) } } @@ -350,7 +344,11 @@ func (t *tags) Lookup(ctx context.Context, digest v1.Descriptor) ([]string, erro panic("not implemented") } -func (t *tags) Tag(ctx context.Context, tag string, desc v1.Descriptor) error { +func (t *tags) List(ctx context.Context, limit int, last string) ([]string, error) { + panic("not implemented") +} + +func (t *tags) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { panic("not implemented") } @@ -399,6 +397,12 @@ func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, erro if err != nil { return false, err } + + mediaTypes := distribution.ManifestMediaTypes() + for _, t := range mediaTypes { + req.Header.Add("Accept", t) + } + resp, err := ms.client.Do(req) if err != nil { return false, err @@ -733,16 +737,16 @@ func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (v1.Descri }) } -type optionFunc func(interface{}) error +type optionFunc func(any) error -func (f optionFunc) Apply(v interface{}) error { +func (f optionFunc) Apply(v any) error { return f(v) } // WithMountFrom returns a BlobCreateOption which designates that the blob should be // mounted from the given canonical reference. func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { - return optionFunc(func(v interface{}) error { + return optionFunc(func(v any) error { opts, ok := v.(*distribution.CreateOptions) if !ok { return fmt.Errorf("unexpected options type: %T", v) diff --git a/vendor/github.com/distribution/distribution/v3/internal/dcontext/context.go b/vendor/github.com/distribution/distribution/v3/internal/dcontext/context.go index fe8379800..090d16820 100644 --- a/vendor/github.com/distribution/distribution/v3/internal/dcontext/context.go +++ b/vendor/github.com/distribution/distribution/v3/internal/dcontext/context.go @@ -2,9 +2,10 @@ package dcontext import ( "context" + "maps" "sync" - "github.com/google/uuid" + "github.com/distribution/distribution/v3/internal/uuid" ) // instanceContext is a context that provides only an instance id. It is @@ -15,7 +16,7 @@ type instanceContext struct { once sync.Once // once protect generation of the id } -func (ic *instanceContext) Value(key interface{}) interface{} { +func (ic *instanceContext) Value(key any) any { if key == "instance.id" { ic.once.Do(func() { // We want to lazy initialize the UUID such that we don't @@ -45,16 +46,14 @@ func Background() context.Context { // key, falling back to a parent if not present. type stringMapContext struct { context.Context - m map[string]interface{} + m map[string]any } // WithValues returns a context that proxies lookups through a map. Only // supports string keys. -func WithValues(ctx context.Context, m map[string]interface{}) context.Context { - mo := make(map[string]interface{}, len(m)) // make our own copy. - for k, v := range m { - mo[k] = v - } +func WithValues(ctx context.Context, m map[string]any) context.Context { + mo := make(map[string]any, len(m)) // make our own copy. + maps.Copy(mo, m) return stringMapContext{ Context: ctx, @@ -62,7 +61,7 @@ func WithValues(ctx context.Context, m map[string]interface{}) context.Context { } } -func (smc stringMapContext) Value(key interface{}) interface{} { +func (smc stringMapContext) Value(key any) any { if ks, ok := key.(string); ok { if v, ok := smc.m[ks]; ok { return v diff --git a/vendor/github.com/distribution/distribution/v3/internal/dcontext/http.go b/vendor/github.com/distribution/distribution/v3/internal/dcontext/http.go index 84d5b4744..cd3e81fec 100644 --- a/vendor/github.com/distribution/distribution/v3/internal/dcontext/http.go +++ b/vendor/github.com/distribution/distribution/v3/internal/dcontext/http.go @@ -9,7 +9,7 @@ import ( "time" "github.com/distribution/distribution/v3/internal/requestutil" - "github.com/google/uuid" + "github.com/distribution/distribution/v3/internal/uuid" "github.com/gorilla/mux" ) @@ -132,7 +132,7 @@ type httpRequestContext struct { // Value returns a keyed element of the request for use in the context. To get // the request itself, query "request". For other components, access them as // "request.". For example, r.RequestURI -func (ctx *httpRequestContext) Value(key interface{}) interface{} { +func (ctx *httpRequestContext) Value(key any) any { if keyStr, ok := key.(string); ok { switch keyStr { case "http.request": @@ -173,7 +173,7 @@ type muxVarsContext struct { vars map[string]string } -func (ctx *muxVarsContext) Value(key interface{}) interface{} { +func (ctx *muxVarsContext) Value(key any) any { if keyStr, ok := key.(string); ok { if keyStr == "vars" { return ctx.vars @@ -230,7 +230,7 @@ func (irw *instrumentedResponseWriter) Flush() { } } -func (irw *instrumentedResponseWriter) Value(key interface{}) interface{} { +func (irw *instrumentedResponseWriter) Value(key any) any { if keyStr, ok := key.(string); ok { switch keyStr { case "http.response": diff --git a/vendor/github.com/distribution/distribution/v3/internal/dcontext/logger.go b/vendor/github.com/distribution/distribution/v3/internal/dcontext/logger.go index 058fc8310..d85928d19 100644 --- a/vendor/github.com/distribution/distribution/v3/internal/dcontext/logger.go +++ b/vendor/github.com/distribution/distribution/v3/internal/dcontext/logger.go @@ -17,34 +17,34 @@ var ( // Logger provides a leveled-logging interface. type Logger interface { // standard logger methods - Print(args ...interface{}) - Printf(format string, args ...interface{}) - Println(args ...interface{}) + Print(args ...any) + Printf(format string, args ...any) + Println(args ...any) - Fatal(args ...interface{}) - Fatalf(format string, args ...interface{}) - Fatalln(args ...interface{}) + Fatal(args ...any) + Fatalf(format string, args ...any) + Fatalln(args ...any) - Panic(args ...interface{}) - Panicf(format string, args ...interface{}) - Panicln(args ...interface{}) + Panic(args ...any) + Panicf(format string, args ...any) + Panicln(args ...any) // Leveled methods, from logrus - Debug(args ...interface{}) - Debugf(format string, args ...interface{}) - Debugln(args ...interface{}) + Debug(args ...any) + Debugf(format string, args ...any) + Debugln(args ...any) - Error(args ...interface{}) - Errorf(format string, args ...interface{}) - Errorln(args ...interface{}) + Error(args ...any) + Errorf(format string, args ...any) + Errorln(args ...any) - Info(args ...interface{}) - Infof(format string, args ...interface{}) - Infoln(args ...interface{}) + Info(args ...any) + Infof(format string, args ...any) + Infoln(args ...any) - Warn(args ...interface{}) - Warnf(format string, args ...interface{}) - Warnln(args ...interface{}) + Warn(args ...any) + Warnf(format string, args ...any) + Warnln(args ...any) WithError(err error) *logrus.Entry } @@ -59,14 +59,14 @@ func WithLogger(ctx context.Context, logger Logger) context.Context { // GetLoggerWithField returns a logger instance with the specified field key // and value without affecting the context. Extra specified keys will be // resolved from the context. -func GetLoggerWithField(ctx context.Context, key, value interface{}, keys ...interface{}) Logger { +func GetLoggerWithField(ctx context.Context, key, value any, keys ...any) Logger { return getLogrusLogger(ctx, keys...).WithField(fmt.Sprint(key), value) } // GetLoggerWithFields returns a logger instance with the specified fields // without affecting the context. Extra specified keys will be resolved from // the context. -func GetLoggerWithFields(ctx context.Context, fields map[interface{}]interface{}, keys ...interface{}) Logger { +func GetLoggerWithFields(ctx context.Context, fields map[any]any, keys ...any) Logger { // must convert from interface{} -> interface{} to string -> interface{} for logrus. lfields := make(logrus.Fields, len(fields)) for key, value := range fields { @@ -82,7 +82,7 @@ func GetLoggerWithFields(ctx context.Context, fields map[interface{}]interface{} // argument passed to GetLogger will be passed to fmt.Sprint when expanded as // a logging key field. If context keys are integer constants, for example, // its recommended that a String method is implemented. -func GetLogger(ctx context.Context, keys ...interface{}) Logger { +func GetLogger(ctx context.Context, keys ...any) Logger { return getLogrusLogger(ctx, keys...) } @@ -102,7 +102,7 @@ func SetDefaultLogger(logger Logger) { // are provided, they will be resolved on the context and included in the // logger. Only use this function if specific logrus functionality is // required. -func getLogrusLogger(ctx context.Context, keys ...interface{}) *logrus.Entry { +func getLogrusLogger(ctx context.Context, keys ...any) *logrus.Entry { var logger *logrus.Entry // Get a logger, if it is present. diff --git a/vendor/github.com/distribution/distribution/v3/internal/dcontext/trace.go b/vendor/github.com/distribution/distribution/v3/internal/dcontext/trace.go index ba2481053..de90c2744 100644 --- a/vendor/github.com/distribution/distribution/v3/internal/dcontext/trace.go +++ b/vendor/github.com/distribution/distribution/v3/internal/dcontext/trace.go @@ -5,7 +5,7 @@ import ( "runtime" "time" - "github.com/google/uuid" + "github.com/distribution/distribution/v3/internal/uuid" ) // WithTrace allocates a traced timing span in a new context. This allows a @@ -37,7 +37,7 @@ import ( // // Notice that the function name is automatically resolved, along with the // package and a trace id is emitted that can be linked with parent ids. -func WithTrace(ctx context.Context) (context.Context, func(format string, a ...interface{})) { +func WithTrace(ctx context.Context) (context.Context, func(format string, a ...any)) { if ctx == nil { ctx = Background() } @@ -54,7 +54,7 @@ func WithTrace(ctx context.Context) (context.Context, func(format string, a ...i line: line, } - return ctx, func(format string, a ...interface{}) { + return ctx, func(format string, a ...any) { GetLogger(ctx, "trace.duration", "trace.id", @@ -79,7 +79,7 @@ type traced struct { line int } -func (ts *traced) Value(key interface{}) interface{} { +func (ts *traced) Value(key any) any { switch key { case "trace.start": return ts.start diff --git a/vendor/github.com/distribution/distribution/v3/internal/dcontext/util.go b/vendor/github.com/distribution/distribution/v3/internal/dcontext/util.go index 5b32ba16f..ef8557cb6 100644 --- a/vendor/github.com/distribution/distribution/v3/internal/dcontext/util.go +++ b/vendor/github.com/distribution/distribution/v3/internal/dcontext/util.go @@ -8,7 +8,7 @@ import ( // Since looks up key, which should be a time.Time, and returns the duration // since that time. If the key is not found, the value returned will be zero. // This is helpful when inferring metrics related to context execution times. -func Since(ctx context.Context, key interface{}) time.Duration { +func Since(ctx context.Context, key any) time.Duration { if startedAt, ok := ctx.Value(key).(time.Time); ok { return time.Since(startedAt) } @@ -17,7 +17,7 @@ func Since(ctx context.Context, key interface{}) time.Duration { // GetStringValue returns a string value from the context. The empty string // will be returned if not found. -func GetStringValue(ctx context.Context, key interface{}) (value string) { +func GetStringValue(ctx context.Context, key any) (value string) { if valuev, ok := ctx.Value(key).(string); ok { value = valuev } diff --git a/vendor/github.com/distribution/distribution/v3/internal/uuid/uuid.go b/vendor/github.com/distribution/distribution/v3/internal/uuid/uuid.go new file mode 100644 index 000000000..b0ed73b62 --- /dev/null +++ b/vendor/github.com/distribution/distribution/v3/internal/uuid/uuid.go @@ -0,0 +1,11 @@ +package uuid + +import ( + "github.com/google/uuid" +) + +// NewString returns a new V7 UUID string. V7 UUIDs are time-ordered for better database performance. +// Panics on error to maintain compatibility with google/uuid's NewString() method. +func NewString() string { + return uuid.Must(uuid.NewV7()).String() +} diff --git a/vendor/github.com/distribution/distribution/v3/manifest/manifestlist/manifestlist.go b/vendor/github.com/distribution/distribution/v3/manifest/manifestlist/manifestlist.go index 4752331ab..ecf966480 100644 --- a/vendor/github.com/distribution/distribution/v3/manifest/manifestlist/manifestlist.go +++ b/vendor/github.com/distribution/distribution/v3/manifest/manifestlist/manifestlist.go @@ -196,8 +196,8 @@ func (m DeserializedManifestList) Payload() (string, []byte, error) { // contains fields that belong to a manifest func validateManifestList(b []byte) error { var doc struct { - Config interface{} `json:"config,omitempty"` - Layers interface{} `json:"layers,omitempty"` + Config any `json:"config,omitempty"` + Layers any `json:"layers,omitempty"` } if err := json.Unmarshal(b, &doc); err != nil { return err diff --git a/vendor/github.com/distribution/distribution/v3/manifest/ocischema/index.go b/vendor/github.com/distribution/distribution/v3/manifest/ocischema/index.go index add766f3e..df2da46c3 100644 --- a/vendor/github.com/distribution/distribution/v3/manifest/ocischema/index.go +++ b/vendor/github.com/distribution/distribution/v3/manifest/ocischema/index.go @@ -151,8 +151,8 @@ func (m DeserializedImageIndex) Payload() (string, []byte, error) { // contains fields that belong to a manifest func validateIndex(b []byte) error { var doc struct { - Config interface{} `json:"config,omitempty"` - Layers interface{} `json:"layers,omitempty"` + Config any `json:"config,omitempty"` + Layers any `json:"layers,omitempty"` } if err := json.Unmarshal(b, &doc); err != nil { return err diff --git a/vendor/github.com/distribution/distribution/v3/manifest/ocischema/manifest.go b/vendor/github.com/distribution/distribution/v3/manifest/ocischema/manifest.go index 2009e2f5a..05c901f18 100644 --- a/vendor/github.com/distribution/distribution/v3/manifest/ocischema/manifest.go +++ b/vendor/github.com/distribution/distribution/v3/manifest/ocischema/manifest.go @@ -141,7 +141,7 @@ func (m *DeserializedManifest) Payload() (string, []byte, error) { // contains fields that belong to a index func validateManifest(b []byte) error { var doc struct { - Manifests interface{} `json:"manifests,omitempty"` + Manifests any `json:"manifests,omitempty"` } if err := json.Unmarshal(b, &doc); err != nil { return err diff --git a/vendor/github.com/distribution/distribution/v3/notifications/bridge.go b/vendor/github.com/distribution/distribution/v3/notifications/bridge.go index 81b5409cc..3e22bde11 100644 --- a/vendor/github.com/distribution/distribution/v3/notifications/bridge.go +++ b/vendor/github.com/distribution/distribution/v3/notifications/bridge.go @@ -6,9 +6,9 @@ import ( "github.com/distribution/distribution/v3" "github.com/distribution/distribution/v3/internal/requestutil" + "github.com/distribution/distribution/v3/internal/uuid" "github.com/distribution/reference" events "github.com/docker/go-events" - "github.com/google/uuid" "github.com/opencontainers/go-digest" v1 "github.com/opencontainers/image-spec/specs-go/v1" ) diff --git a/vendor/github.com/distribution/distribution/v3/notifications/endpoint.go b/vendor/github.com/distribution/distribution/v3/notifications/endpoint.go index f67e8cfe7..ad2227517 100644 --- a/vendor/github.com/distribution/distribution/v3/notifications/endpoint.go +++ b/vendor/github.com/distribution/distribution/v3/notifications/endpoint.go @@ -1,6 +1,7 @@ package notifications import ( + "maps" "net/http" "time" @@ -92,7 +93,5 @@ func (e *Endpoint) ReadMetrics(em *EndpointMetrics) { *em = e.metrics.EndpointMetrics // Map still need to copied in a threadsafe manner. em.Statuses = make(map[string]int) - for k, v := range e.metrics.Statuses { - em.Statuses[k] = v - } + maps.Copy(em.Statuses, e.metrics.Statuses) } diff --git a/vendor/github.com/distribution/distribution/v3/notifications/event.go b/vendor/github.com/distribution/distribution/v3/notifications/event.go index 98dd9faf0..fff87966e 100644 --- a/vendor/github.com/distribution/distribution/v3/notifications/event.go +++ b/vendor/github.com/distribution/distribution/v3/notifications/event.go @@ -45,7 +45,7 @@ type Event struct { ID string `json:"id,omitempty"` // Timestamp is the time at which the event occurred. - Timestamp time.Time `json:"timestamp,omitempty"` + Timestamp time.Time `json:"timestamp"` // Action indicates what action encompasses the provided event. Action string `json:"action,omitempty"` @@ -75,19 +75,19 @@ type Event struct { // References provides the references descriptors. References []v1.Descriptor `json:"references,omitempty"` - } `json:"target,omitempty"` + } `json:"target"` // Request covers the request that generated the event. - Request RequestRecord `json:"request,omitempty"` + Request RequestRecord `json:"request"` // Actor specifies the agent that initiated the event. For most // situations, this could be from the authorization context of the request. - Actor ActorRecord `json:"actor,omitempty"` + Actor ActorRecord `json:"actor"` // Source identifies the registry node that generated the event. Put // differently, while the actor "initiates" the event, the source // "generates" it. - Source SourceRecord `json:"source,omitempty"` + Source SourceRecord `json:"source"` } // ActorRecord specifies the agent that initiated the event. For most diff --git a/vendor/github.com/distribution/distribution/v3/notifications/listener.go b/vendor/github.com/distribution/distribution/v3/notifications/listener.go index 8a36067ef..31ae0bdff 100644 --- a/vendor/github.com/distribution/distribution/v3/notifications/listener.go +++ b/vendor/github.com/distribution/distribution/v3/notifications/listener.go @@ -167,7 +167,9 @@ func (bsl *blobServiceListener) Open(ctx context.Context, dgst digest.Digest) (i func (bsl *blobServiceListener) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { err := bsl.BlobStore.ServeBlob(ctx, w, r, dgst) if err == nil { - if desc, err := bsl.Stat(ctx, dgst); err != nil { + // Use a detached context for Stat() since the HTTP request context may be canceled + // after ServeBlob completes, but we still want to send the notification. + if desc, err := bsl.Stat(context.WithoutCancel(ctx), dgst); err != nil { dcontext.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) } else { if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Named(), desc); err != nil { diff --git a/vendor/github.com/distribution/distribution/v3/notifications/metrics.go b/vendor/github.com/distribution/distribution/v3/notifications/metrics.go index 238fdb004..0495eb7ef 100644 --- a/vendor/github.com/distribution/distribution/v3/notifications/metrics.go +++ b/vendor/github.com/distribution/distribution/v3/notifications/metrics.go @@ -39,11 +39,15 @@ func init() { var notifications expvar.Map notifications.Init() - notifications.Set("endpoints", expvar.Func(func() interface{} { + notifications.Set("endpoints", expvar.Func(func() any { endpoints.mu.Lock() defer endpoints.mu.Unlock() - var names []interface{} + var names []any + if len(endpoints.registered) == 0 { + return names + } + names = make([]any, 0, len(endpoints.registered)) for _, v := range endpoints.registered { var epjson struct { Name string `json:"name"` diff --git a/vendor/github.com/distribution/distribution/v3/notifications/sinks.go b/vendor/github.com/distribution/distribution/v3/notifications/sinks.go index 9334a0875..7caf9be01 100644 --- a/vendor/github.com/distribution/distribution/v3/notifications/sinks.go +++ b/vendor/github.com/distribution/distribution/v3/notifications/sinks.go @@ -128,7 +128,7 @@ type ignoredSink struct { } func newIgnoredSink(sink events.Sink, ignored []string, ignoreActions []string) events.Sink { - if len(ignored) == 0 { + if len(ignored) == 0 && len(ignoreActions) == 0 { return sink } diff --git a/vendor/github.com/distribution/distribution/v3/registry/api/errcode/errors.go b/vendor/github.com/distribution/distribution/v3/registry/api/errcode/errors.go index a8b52c188..31ed53848 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/api/errcode/errors.go +++ b/vendor/github.com/distribution/distribution/v3/registry/api/errcode/errors.go @@ -26,7 +26,7 @@ func (ec ErrorCode) ErrorCode() ErrorCode { // Error returns the ID/Value func (ec ErrorCode) Error() string { // NOTE(stevvooe): Cannot use message here since it may have unpopulated args. - return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1)) + return strings.ToLower(strings.ReplaceAll(ec.String(), "_", " ")) } // Descriptor returns the descriptor for the error code. @@ -80,7 +80,7 @@ func (ec ErrorCode) WithMessage(message string) Error { // WithDetail creates a new Error struct based on the passed-in info and // set the Detail property appropriately -func (ec ErrorCode) WithDetail(detail interface{}) Error { +func (ec ErrorCode) WithDetail(detail any) Error { return Error{ Code: ec, Message: ec.Message(), @@ -88,7 +88,7 @@ func (ec ErrorCode) WithDetail(detail interface{}) Error { } // WithArgs creates a new Error struct and sets the Args slice -func (ec ErrorCode) WithArgs(args ...interface{}) Error { +func (ec ErrorCode) WithArgs(args ...any) Error { return Error{ Code: ec, Message: ec.Message(), @@ -97,9 +97,9 @@ func (ec ErrorCode) WithArgs(args ...interface{}) Error { // Error provides a wrapper around ErrorCode with extra Details provided. type Error struct { - Code ErrorCode `json:"code"` - Message string `json:"message"` - Detail interface{} `json:"detail,omitempty"` + Code ErrorCode `json:"code"` + Message string `json:"message"` + Detail any `json:"detail,omitempty"` // TODO(duglin): See if we need an "args" property so we can do the // variable substitution right before showing the message to the user @@ -119,7 +119,7 @@ func (e Error) Error() string { // WithDetail will return a new Error, based on the current one, but with // some Detail info added -func (e Error) WithDetail(detail interface{}) Error { +func (e Error) WithDetail(detail any) Error { return Error{ Code: e.Code, Message: e.Message, @@ -129,7 +129,7 @@ func (e Error) WithDetail(detail interface{}) Error { // WithArgs uses the passed-in list of interface{} as the substitution // variables in the Error's Message string, but returns a new Error -func (e Error) WithArgs(args ...interface{}) Error { +func (e Error) WithArgs(args ...any) Error { return Error{ Code: e.Code, Message: fmt.Sprintf(e.Code.Message(), args...), @@ -184,11 +184,12 @@ func (errs Errors) Error() string { case 1: return errs[0].Error() default: - msg := "errors:\n" + var msg strings.Builder + msg.WriteString("errors:\n") for _, err := range errs { - msg += err.Error() + "\n" + msg.WriteString(err.Error() + "\n") } - return msg + return msg.String() } } diff --git a/vendor/github.com/distribution/distribution/v3/registry/api/errcode/register.go b/vendor/github.com/distribution/distribution/v3/registry/api/errcode/register.go index 6030c36a5..3fc71a5ed 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/api/errcode/register.go +++ b/vendor/github.com/distribution/distribution/v3/registry/api/errcode/register.go @@ -268,8 +268,7 @@ func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value } // GetGroupNames returns the list of Error group names that are registered func GetGroupNames() []string { - keys := []string{} - + keys := make([]string, 0, len(groupToDescriptors)) for k := range groupToDescriptors { keys = append(keys, k) } @@ -287,9 +286,9 @@ func GetErrorCodeGroup(name string) []ErrorDescriptor { // GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are // registered, irrespective of what group they're in func GetErrorAllDescriptors() []ErrorDescriptor { - result := []ErrorDescriptor{} - - for _, group := range GetGroupNames() { + groups := GetGroupNames() + result := make([]ErrorDescriptor, 0, len(groups)) + for _, group := range groups { result = append(result, GetErrorCodeGroup(group)...) } sort.Sort(byValue(result)) diff --git a/vendor/github.com/distribution/distribution/v3/registry/auth/auth.go b/vendor/github.com/distribution/distribution/v3/registry/auth/auth.go index 687002c3b..954496676 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/auth/auth.go +++ b/vendor/github.com/distribution/distribution/v3/registry/auth/auth.go @@ -47,7 +47,7 @@ var ( // InitFunc is the type of an AccessController factory function and is used // to register the constructor for different AccessController backends. -type InitFunc func(options map[string]interface{}) (AccessController, error) +type InitFunc func(options map[string]any) (AccessController, error) var accessControllers map[string]InitFunc @@ -128,7 +128,7 @@ func Register(name string, initFunc InitFunc) error { // GetAccessController constructs an AccessController // with the given options using the named backend. -func GetAccessController(name string, options map[string]interface{}) (AccessController, error) { +func GetAccessController(name string, options map[string]any) (AccessController, error) { if initFunc, exists := accessControllers[name]; exists { return initFunc(options) } diff --git a/vendor/github.com/distribution/distribution/v3/registry/handlers/app.go b/vendor/github.com/distribution/distribution/v3/registry/handlers/app.go index 301fc9706..bae748438 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/handlers/app.go +++ b/vendor/github.com/distribution/distribution/v3/registry/handlers/app.go @@ -131,13 +131,13 @@ func NewApp(ctx context.Context, config *configuration.Configuration) *App { purgeConfig := uploadPurgeDefaultConfig() if mc, ok := config.Storage["maintenance"]; ok { if v, ok := mc["uploadpurging"]; ok { - purgeConfig, ok = v.(map[interface{}]interface{}) + purgeConfig, ok = v.(map[any]any) if !ok { panic("uploadpurging config key must contain additional keys") } } if v, ok := mc["readonly"]; ok { - readOnly, ok := v.(map[interface{}]interface{}) + readOnly, ok := v.(map[any]any) if !ok { panic("readonly config key must contain additional keys") } @@ -464,7 +464,7 @@ func (app *App) register(routeName string, dispatch dispatchFunc) { // Chain the handler with prometheus instrumented handler if app.Config.HTTP.Debug.Prometheus.Enabled { namespace := metrics.NewNamespace(prometheus.NamespacePrefix, "http", nil) - httpMetrics := namespace.NewDefaultHttpMetrics(strings.Replace(routeName, "-", "_", -1)) + httpMetrics := namespace.NewDefaultHttpMetrics(strings.ReplaceAll(routeName, "-", "_")) metrics.Register(namespace) handler = metrics.InstrumentHandler(httpMetrics, handler) } @@ -536,18 +536,58 @@ func (app *App) configureRedis(cfg *configuration.Configuration) { return } + opts := redis.UniversalOptions{ + Addrs: cfg.Redis.Options.Addrs, + ClientName: cfg.Redis.Options.ClientName, + DB: cfg.Redis.Options.DB, + Protocol: cfg.Redis.Options.Protocol, + Username: cfg.Redis.Options.Username, + Password: cfg.Redis.Options.Password, + SentinelUsername: cfg.Redis.Options.SentinelUsername, + SentinelPassword: cfg.Redis.Options.SentinelPassword, + MaxRetries: cfg.Redis.Options.MaxRetries, + MinRetryBackoff: cfg.Redis.Options.MinRetryBackoff, + MaxRetryBackoff: cfg.Redis.Options.MaxRetryBackoff, + DialTimeout: cfg.Redis.Options.DialTimeout, + ReadTimeout: cfg.Redis.Options.ReadTimeout, + WriteTimeout: cfg.Redis.Options.WriteTimeout, + ContextTimeoutEnabled: cfg.Redis.Options.ContextTimeoutEnabled, + PoolFIFO: cfg.Redis.Options.PoolFIFO, + PoolSize: cfg.Redis.Options.PoolSize, + PoolTimeout: cfg.Redis.Options.PoolTimeout, + MinIdleConns: cfg.Redis.Options.MinIdleConns, + MaxIdleConns: cfg.Redis.Options.MaxIdleConns, + MaxActiveConns: cfg.Redis.Options.MaxActiveConns, + ConnMaxIdleTime: cfg.Redis.Options.ConnMaxIdleTime, + ConnMaxLifetime: cfg.Redis.Options.ConnMaxLifetime, + MaxRedirects: cfg.Redis.Options.MaxRedirects, + ReadOnly: cfg.Redis.Options.ReadOnly, + RouteByLatency: cfg.Redis.Options.RouteByLatency, + RouteRandomly: cfg.Redis.Options.RouteRandomly, + MasterName: cfg.Redis.Options.MasterName, + DisableIdentity: cfg.Redis.Options.DisableIdentity, + IdentitySuffix: cfg.Redis.Options.IdentitySuffix, + UnstableResp3: cfg.Redis.Options.UnstableResp3, + } + // redis TLS config - if cfg.Redis.TLS.Certificate != "" || cfg.Redis.TLS.Key != "" { + if cfg.Redis.TLS.Certificate != "" || cfg.Redis.TLS.Key != "" || len(cfg.Redis.TLS.RootCAs) != 0 { + if (cfg.Redis.TLS.Certificate == "") != (cfg.Redis.TLS.Key == "") { + dcontext.GetLogger(app).Warn("redis TLS client certificate configuration is incomplete; both redis.tls.certificate and redis.tls.key must be set to enable mTLS, continuing without client certificates") + } + var err error tlsConf := &tls.Config{} - tlsConf.Certificates = make([]tls.Certificate, 1) - tlsConf.Certificates[0], err = tls.LoadX509KeyPair(cfg.Redis.TLS.Certificate, cfg.Redis.TLS.Key) - if err != nil { - panic(err) + if cfg.Redis.TLS.Certificate != "" && cfg.Redis.TLS.Key != "" { + tlsConf.Certificates = make([]tls.Certificate, 1) + tlsConf.Certificates[0], err = tls.LoadX509KeyPair(cfg.Redis.TLS.Certificate, cfg.Redis.TLS.Key) + if err != nil { + panic(err) + } } - if len(cfg.Redis.TLS.ClientCAs) != 0 { + if len(cfg.Redis.TLS.RootCAs) != 0 { pool := x509.NewCertPool() - for _, ca := range cfg.Redis.TLS.ClientCAs { + for _, ca := range cfg.Redis.TLS.RootCAs { caPem, err := os.ReadFile(ca) if err != nil { dcontext.GetLogger(app).Errorf("failed reading redis client CA: %v", err) @@ -559,13 +599,12 @@ func (app *App) configureRedis(cfg *configuration.Configuration) { return } } - tlsConf.ClientAuth = tls.RequireAndVerifyClientCert - tlsConf.ClientCAs = pool + tlsConf.RootCAs = pool } - cfg.Redis.Options.TLSConfig = tlsConf + opts.TLSConfig = tlsConf } - app.redis = app.createPool(cfg.Redis.Options) + app.redis = app.createPool(opts) // Enable metrics instrumentation. if err := redisotel.InstrumentMetrics(app.redis); err != nil { @@ -578,9 +617,9 @@ func (app *App) configureRedis(cfg *configuration.Configuration) { registry = expvar.NewMap("registry") } - registry.(*expvar.Map).Set("redis", expvar.Func(func() interface{} { + registry.(*expvar.Map).Set("redis", expvar.Func(func() any { stats := app.redis.PoolStats() - return map[string]interface{}{ + return map[string]any{ "Config": cfg, "Active": stats.TotalConns - stats.IdleConns, } @@ -1014,8 +1053,8 @@ func applyStorageMiddleware(ctx context.Context, driver storagedriver.StorageDri // uploadPurgeDefaultConfig provides a default configuration for upload // purging to be used in the absence of configuration in the // configuration file -func uploadPurgeDefaultConfig() map[interface{}]interface{} { - config := map[interface{}]interface{}{} +func uploadPurgeDefaultConfig() map[any]any { + config := map[any]any{} config["enabled"] = true config["age"] = "168h" config["interval"] = "24h" @@ -1029,7 +1068,7 @@ func badPurgeUploadConfig(reason string) { // startUploadPurger schedules a goroutine which will periodically // check upload directories for old files and delete them -func startUploadPurger(ctx context.Context, storageDriver storagedriver.StorageDriver, log dcontext.Logger, config map[interface{}]interface{}) { +func startUploadPurger(ctx context.Context, storageDriver storagedriver.StorageDriver, log dcontext.Logger, config map[any]any) { if config["enabled"] == false { return } diff --git a/vendor/github.com/distribution/distribution/v3/registry/handlers/context.go b/vendor/github.com/distribution/distribution/v3/registry/handlers/context.go index c272095c8..acc84223d 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/handlers/context.go +++ b/vendor/github.com/distribution/distribution/v3/registry/handlers/context.go @@ -42,7 +42,7 @@ type Context struct { // Value overrides context.Context.Value to ensure that calls are routed to // correct context. -func (ctx *Context) Value(key interface{}) interface{} { +func (ctx *Context) Value(key any) any { return ctx.Context.Value(key) } @@ -117,7 +117,7 @@ type userInfoContext struct { user auth.UserInfo } -func (uic userInfoContext) Value(key interface{}) interface{} { +func (uic userInfoContext) Value(key any) any { switch key { case userKey: return uic.user @@ -143,7 +143,7 @@ type resourceContext struct { type resourceKey struct{} -func (rc resourceContext) Value(key interface{}) interface{} { +func (rc resourceContext) Value(key any) any { if key == (resourceKey{}) { return rc.resources } diff --git a/vendor/github.com/distribution/distribution/v3/registry/handlers/helpers.go b/vendor/github.com/distribution/distribution/v3/registry/handlers/helpers.go index 3ccba5558..b1bdea34d 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/handlers/helpers.go +++ b/vendor/github.com/distribution/distribution/v3/registry/handlers/helpers.go @@ -50,7 +50,7 @@ func copyFullPayload(ctx context.Context, responseWriter http.ResponseWriter, r // instead of showing 0 for the HTTP status. responseWriter.WriteHeader(499) - dcontext.GetLoggerWithFields(ctx, map[interface{}]interface{}{ + dcontext.GetLoggerWithFields(ctx, map[any]any{ "error": err, "copied": copied, "contentLength": r.ContentLength, diff --git a/vendor/github.com/distribution/distribution/v3/registry/handlers/hooks.go b/vendor/github.com/distribution/distribution/v3/registry/handlers/hooks.go index 76b39aedf..34106aaa6 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/handlers/hooks.go +++ b/vendor/github.com/distribution/distribution/v3/registry/handlers/hooks.go @@ -43,7 +43,7 @@ func (hook *logHook) Fire(entry *logrus.Entry) error { // Levels contains hook levels to be catched func (hook *logHook) Levels() []logrus.Level { - levels := []logrus.Level{} + levels := make([]logrus.Level, 0, len(hook.LevelsParam)) for _, v := range hook.LevelsParam { lv, _ := logrus.ParseLevel(v) levels = append(levels, lv) diff --git a/vendor/github.com/distribution/distribution/v3/registry/handlers/manifests.go b/vendor/github.com/distribution/distribution/v3/registry/handlers/manifests.go index 6b7648c2b..c37188335 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/handlers/manifests.go +++ b/vendor/github.com/distribution/distribution/v3/registry/handlers/manifests.go @@ -5,6 +5,7 @@ import ( "fmt" "mime" "net/http" + "slices" "strings" "sync" @@ -95,7 +96,7 @@ func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) // we need to split each header value on "," to get the full list of "Accept" values (per RFC 2616) // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1 - for _, mediaType := range strings.Split(acceptHeader, ",") { + for mediaType := range strings.SplitSeq(acceptHeader, ",") { if mediaType, _, err = mime.ParseMediaType(mediaType); err != nil { continue } @@ -153,9 +154,10 @@ func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) if _, isOCImanifest := manifest.(*ocischema.DeserializedManifest); isOCImanifest { manifestType = ociSchema } else if isManifestList { - if manifestList.MediaType == manifestlist.MediaTypeManifestList { + switch manifestList.MediaType { + case manifestlist.MediaTypeManifestList: manifestType = manifestlistSchema - } else if manifestList.MediaType == v1.MediaTypeImageIndex { + case v1.MediaTypeImageIndex: manifestType = ociImageIndexSchema } } @@ -217,6 +219,7 @@ func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) w.Header().Set("Etag", fmt.Sprintf(`"%s"`, imh.Digest)) if r.Method == http.MethodHead { + w.WriteHeader(http.StatusOK) return } @@ -393,11 +396,8 @@ func (imh *manifestHandler) applyResourcePolicy(manifest distribution.Manifest) // Check to see if class is allowed in registry var allowedClass bool - for _, c := range allowedClasses { - if class == c { - allowedClass = true - break - } + if slices.Contains(allowedClasses, class) { + allowedClass = true } if !allowedClass { return errcode.ErrorCodeDenied.WithMessage(fmt.Sprintf("registry does not allow %s manifest", class)) @@ -491,7 +491,6 @@ func (imh *manifestHandler) DeleteManifest(w http.ResponseWriter, r *http.Reques g := errgroup.Group{} g.SetLimit(storage.DefaultConcurrencyLimit) for _, tag := range referencedTags { - tag := tag g.Go(func() error { if err := tagService.Untag(imh, tag); err != nil { diff --git a/vendor/github.com/distribution/distribution/v3/registry/handlers/tags.go b/vendor/github.com/distribution/distribution/v3/registry/handlers/tags.go index bfd56fc62..4d88a74ee 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/handlers/tags.go +++ b/vendor/github.com/distribution/distribution/v3/registry/handlers/tags.go @@ -2,8 +2,8 @@ package handlers import ( "encoding/json" + "io" "net/http" - "sort" "strconv" "github.com/distribution/distribution/v3" @@ -34,71 +34,69 @@ type tagsAPIResponse struct { // GetTags returns a json list of tags for a specific image name. func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) { - tagService := th.Repository.Tags(th) - tags, err := tagService.All(th) - if err != nil { - switch err := err.(type) { - case distribution.ErrRepositoryUnknown: - th.Errors = append(th.Errors, errcode.ErrorCodeNameUnknown.WithDetail(map[string]string{"name": th.Repository.Named().Name()})) - case errcode.Error: - th.Errors = append(th.Errors, err) - default: - th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - } - return - } + var moreEntries = true - // do pagination if requested q := r.URL.Query() - // get entries after latest, if any specified - if lastEntry := q.Get("last"); lastEntry != "" { - lastEntryIndex := sort.SearchStrings(tags, lastEntry) + lastEntry := q.Get("last") - // as`sort.SearchStrings` can return len(tags), if the - // specified `lastEntry` is not found, we need to - // ensure it does not panic when slicing. - if lastEntryIndex == len(tags) { - tags = []string{} - } else { - tags = tags[lastEntryIndex+1:] - } - } + limit := -1 - // if no error, means that the user requested `n` entries + // parse n, if n unparseable, or negative assign it to defaultReturnedEntries if n := q.Get("n"); n != "" { - maxEntries, err := strconv.Atoi(n) - if err != nil || maxEntries < 0 { - th.Errors = append(th.Errors, errcode.ErrorCodePaginationNumberInvalid.WithDetail(map[string]string{"n": n})) + if th.App.Config.Tags.MaxTags > 0 { + limit = th.App.Config.Tags.MaxTags + } + parsedMax, err := strconv.Atoi(n) + if err != nil || (limit > 0 && parsedMax > limit) || parsedMax < 0 { + th.Errors = append(th.Errors, errcode.ErrorCodePaginationNumberInvalid.WithDetail(map[string]int{"n": parsedMax})) return } + limit = parsedMax + } - // if there is requested more than or - // equal to the amount of tags we have, - // then set the request to equal `len(tags)`. - // the reason for the `=`, is so the else - // clause will only activate if there - // are tags left the user needs. - if maxEntries >= len(tags) { - maxEntries = len(tags) - } else if maxEntries > 0 { - // defined in `catalog.go` - urlStr, err := createLinkEntry(r.URL.String(), maxEntries, tags[maxEntries-1]) - if err != nil { - th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + filled := make([]string, 0) + + if limit == 0 { + moreEntries = false + } else { + tagService := th.Repository.Tags(th) + // if limit is -1, we want to list all the tags, and receive a io.EOF error + returnedTags, err := tagService.List(th.Context, limit, lastEntry) + if err != nil { + if err != io.EOF { + switch err := err.(type) { + case distribution.ErrRepositoryUnknown: + th.Errors = append(th.Errors, errcode.ErrorCodeNameUnknown.WithDetail(map[string]string{"name": th.Repository.Named().Name()})) + case errcode.Error: + th.Errors = append(th.Errors, err) + default: + th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } return } - w.Header().Set("Link", urlStr) + // err is either io.EOF + moreEntries = false } - - tags = tags[:maxEntries] + filled = returnedTags } w.Header().Set("Content-Type", "application/json") + // Add a link header if there are more entries to retrieve + if moreEntries { + lastEntry = filled[len(filled)-1] + urlStr, err := createLinkEntry(r.URL.String(), limit, lastEntry) + if err != nil { + th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + w.Header().Set("Link", urlStr) + } + enc := json.NewEncoder(w) if err := enc.Encode(tagsAPIResponse{ Name: th.Repository.Named().Name(), - Tags: tags, + Tags: filled, }); err != nil { th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) return diff --git a/vendor/github.com/distribution/distribution/v3/registry/listener/listener.go b/vendor/github.com/distribution/distribution/v3/registry/listener/listener.go index 5b4330302..5142f351a 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/listener/listener.go +++ b/vendor/github.com/distribution/distribution/v3/registry/listener/listener.go @@ -40,7 +40,7 @@ func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { func NewListener(net, laddr string) (net.Listener, error) { listeners, err := activation.Listeners() if err != nil { - return nil, fmt.Errorf("Socket activation failed: %v", err) + return nil, fmt.Errorf("socket activation failed: %v", err) } switch len(listeners) { case 0: @@ -57,7 +57,7 @@ func NewListener(net, laddr string) (net.Listener, error) { log.Info("Using systemd socket activation instead of any configured network listeners") return listeners[0], nil default: - return nil, fmt.Errorf("Found %d socket-activation listeners, only expected 1", len(listeners)) + return nil, fmt.Errorf("found %d socket-activation listeners, only expected 1", len(listeners)) } } diff --git a/vendor/github.com/distribution/distribution/v3/registry/middleware/registry/middleware.go b/vendor/github.com/distribution/distribution/v3/registry/middleware/registry/middleware.go index 16552a319..538210645 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/middleware/registry/middleware.go +++ b/vendor/github.com/distribution/distribution/v3/registry/middleware/registry/middleware.go @@ -11,7 +11,7 @@ import ( // InitFunc is the type of a RegistryMiddleware factory function and is // used to register the constructor for different RegistryMiddleware backends. -type InitFunc func(ctx context.Context, registry distribution.Namespace, driver storagedriver.StorageDriver, options map[string]interface{}) (distribution.Namespace, error) +type InitFunc func(ctx context.Context, registry distribution.Namespace, driver storagedriver.StorageDriver, options map[string]any) (distribution.Namespace, error) var ( middlewares map[string]InitFunc @@ -34,7 +34,7 @@ func Register(name string, initFunc InitFunc) error { } // Get constructs a RegistryMiddleware with the given options using the named backend. -func Get(ctx context.Context, name string, options map[string]interface{}, registry distribution.Namespace, driver storagedriver.StorageDriver) (distribution.Namespace, error) { +func Get(ctx context.Context, name string, options map[string]any, registry distribution.Namespace, driver storagedriver.StorageDriver) (distribution.Namespace, error) { if middlewares != nil { if initFunc, exists := middlewares[name]; exists { return initFunc(ctx, registry, driver, options) diff --git a/vendor/github.com/distribution/distribution/v3/registry/middleware/repository/middleware.go b/vendor/github.com/distribution/distribution/v3/registry/middleware/repository/middleware.go index 8c1cc8ef7..ca136d9f6 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/middleware/repository/middleware.go +++ b/vendor/github.com/distribution/distribution/v3/registry/middleware/repository/middleware.go @@ -9,7 +9,7 @@ import ( // InitFunc is the type of a RepositoryMiddleware factory function and is // used to register the constructor for different RepositoryMiddleware backends. -type InitFunc func(ctx context.Context, repository distribution.Repository, options map[string]interface{}) (distribution.Repository, error) +type InitFunc func(ctx context.Context, repository distribution.Repository, options map[string]any) (distribution.Repository, error) var middlewares map[string]InitFunc @@ -29,7 +29,7 @@ func Register(name string, initFunc InitFunc) error { } // Get constructs a RepositoryMiddleware with the given options using the named backend. -func Get(ctx context.Context, name string, options map[string]interface{}, repository distribution.Repository) (distribution.Repository, error) { +func Get(ctx context.Context, name string, options map[string]any, repository distribution.Repository) (distribution.Repository, error) { if middlewares != nil { if initFunc, exists := middlewares[name]; exists { return initFunc(ctx, repository, options) diff --git a/vendor/github.com/distribution/distribution/v3/registry/proxy/proxyauth.go b/vendor/github.com/distribution/distribution/v3/registry/proxy/proxyauth.go index adf60cb9c..23b1e90de 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/proxy/proxyauth.go +++ b/vendor/github.com/distribution/distribution/v3/registry/proxy/proxyauth.go @@ -1,6 +1,7 @@ package proxy import ( + "net" "net/http" "net/url" "strings" @@ -8,6 +9,7 @@ import ( "github.com/distribution/distribution/v3/internal/client/auth" "github.com/distribution/distribution/v3/internal/client/auth/challenge" "github.com/distribution/distribution/v3/internal/dcontext" + "golang.org/x/net/publicsuffix" ) const challengeHeader = "Docker-Distribution-Api-Version" @@ -66,6 +68,11 @@ func configureAuth(username, password, remoteURL string) (auth.CredentialStore, func getAuthURLs(remoteURL string) ([]string, error) { authURLs := []string{} + remote, err := url.Parse(remoteURL) + if err != nil { + return nil, err + } + resp, err := http.Get(remoteURL + "/v2/") if err != nil { return nil, err @@ -73,7 +80,7 @@ func getAuthURLs(remoteURL string) ([]string, error) { defer resp.Body.Close() for _, c := range challenge.ResponseChallenges(resp) { - if strings.EqualFold(c.Scheme, "bearer") { + if strings.EqualFold(c.Scheme, "bearer") && realmAllowed(remote, c.Parameters["realm"]) { authURLs = append(authURLs, c.Parameters["realm"]) } } @@ -81,6 +88,49 @@ func getAuthURLs(remoteURL string) ([]string, error) { return authURLs, nil } +func realmAllowed(remote *url.URL, realm string) bool { + realmURL, err := url.Parse(realm) + if err != nil { + return false + } + if realmURL.Host == "" || remote == nil || remote.Host == "" { + return false + } + + if strings.EqualFold(remote.Host, realmURL.Host) { + return true + } + + remoteHost := strings.ToLower(remote.Hostname()) + realmHost := strings.ToLower(realmURL.Hostname()) + if remoteHost == "" || realmHost == "" { + return false + } + + if isLiteralOrLocal(remoteHost) || isLiteralOrLocal(realmHost) { + return false + } + + return strings.EqualFold(registrableDomain(remoteHost), registrableDomain(realmHost)) +} + +func isLiteralOrLocal(host string) bool { + if host == "localhost" { + return true + } + + return net.ParseIP(host) != nil +} + +func registrableDomain(host string) string { + domain, err := publicsuffix.EffectiveTLDPlusOne(host) + if err != nil { + return "" + } + + return domain +} + func ping(manager challenge.Manager, endpoint, versionHeader string) error { resp, err := http.Get(endpoint) if err != nil { diff --git a/vendor/github.com/distribution/distribution/v3/registry/proxy/proxyblobstore.go b/vendor/github.com/distribution/distribution/v3/registry/proxy/proxyblobstore.go index 7726f4280..5fca10456 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/proxy/proxyblobstore.go +++ b/vendor/github.com/distribution/distribution/v3/registry/proxy/proxyblobstore.go @@ -18,12 +18,13 @@ import ( ) type proxyBlobStore struct { - localStore distribution.BlobStore - remoteStore distribution.BlobService - scheduler *scheduler.TTLExpirationScheduler - ttl *time.Duration - repositoryName reference.Named - authChallenger authChallenger + localStore distribution.BlobStore + remoteStore distribution.BlobService + scheduler *scheduler.TTLExpirationScheduler + ttl *time.Duration + cacheWriteTimeout time.Duration + repositoryName reference.Named + authChallenger authChallenger } var _ distribution.BlobStore = &proxyBlobStore{} @@ -113,11 +114,28 @@ func (pbs *proxyBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, mu.Unlock() }() - bw, err := pbs.localStore.Create(ctx) + // Create a detached context for the blob writer that won't be canceled + // when the HTTP request context is canceled. This allows the cache write + // to complete even if the client disconnects. + // Use the configured timeout to prevent hanging operations. + writerCtx, cancel := context.WithTimeout(context.WithoutCancel(ctx), pbs.cacheWriteTimeout) + defer cancel() + + bw, err := pbs.localStore.Create(writerCtx) if err != nil { return err } + committed := false + // Ensure the writer is canceled if we return early with an error + defer func() { + if !committed { + if err := bw.Cancel(writerCtx); err != nil { + dcontext.GetLogger(ctx).WithError(err).Errorf("Error canceling blob writer") + } + } + }() + // Serving client and storing locally over same fetching request. // This can prevent a redundant blob fetching. multiWriter := io.MultiWriter(w, bw) @@ -126,11 +144,13 @@ func (pbs *proxyBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, return err } - _, err = bw.Commit(ctx, desc) + _, err = bw.Commit(writerCtx, desc) if err != nil { return err } + committed = true + blobRef, err := reference.WithDigest(pbs.repositoryName, dgst) if err != nil { dcontext.GetLogger(ctx).Errorf("Error creating reference: %s", err) diff --git a/vendor/github.com/distribution/distribution/v3/registry/proxy/proxymetrics.go b/vendor/github.com/distribution/distribution/v3/registry/proxy/proxymetrics.go index 0b492fe12..660618f0e 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/proxy/proxymetrics.go +++ b/vendor/github.com/distribution/distribution/v3/registry/proxy/proxymetrics.go @@ -53,11 +53,11 @@ func init() { registry.(*expvar.Map).Set("proxy", pm) } - pm.(*expvar.Map).Set("blobs", expvar.Func(func() interface{} { + pm.(*expvar.Map).Set("blobs", expvar.Func(func() any { return proxyMetrics.blobMetrics })) - pm.(*expvar.Map).Set("manifests", expvar.Func(func() interface{} { + pm.(*expvar.Map).Set("manifests", expvar.Func(func() any { return proxyMetrics.manifestMetrics })) diff --git a/vendor/github.com/distribution/distribution/v3/registry/proxy/proxyregistry.go b/vendor/github.com/distribution/distribution/v3/registry/proxy/proxyregistry.go index d353af3ff..e85663e3c 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/proxy/proxyregistry.go +++ b/vendor/github.com/distribution/distribution/v3/registry/proxy/proxyregistry.go @@ -5,6 +5,7 @@ import ( "fmt" "net/http" "net/url" + "strings" "sync" "time" @@ -26,12 +27,13 @@ var repositoryTTL = 24 * 7 * time.Hour // proxyingRegistry fetches content from a remote registry and caches it locally type proxyingRegistry struct { - embedded distribution.Namespace // provides local registry functionality - scheduler *scheduler.TTLExpirationScheduler - ttl *time.Duration - remoteURL url.URL - authChallenger authChallenger - basicAuth auth.CredentialStore + embedded distribution.Namespace // provides local registry functionality + scheduler *scheduler.TTLExpirationScheduler + ttl *time.Duration + cacheWriteTimeout time.Duration + remoteURL url.URL + authChallenger authChallenger + basicAuth auth.CredentialStore } // NewRegistryPullThroughCache creates a registry acting as a pull through cache @@ -55,6 +57,12 @@ func NewRegistryPullThroughCache(ctx context.Context, registry distribution.Name ttl = nil } + // Set default cache write timeout if not specified + cacheWriteTimeout := 5 * time.Minute + if config.CacheWriteTimeout != nil && *config.CacheWriteTimeout > 0 { + cacheWriteTimeout = *config.CacheWriteTimeout + } + if ttl != nil { s = scheduler.New(ctx, driver, "/scheduler-state.json") s.OnBlobExpire(func(ref reference.Reference) error { @@ -128,10 +136,11 @@ func NewRegistryPullThroughCache(ctx context.Context, registry distribution.Name } return &proxyingRegistry{ - embedded: registry, - scheduler: s, - ttl: ttl, - remoteURL: *remoteURL, + embedded: registry, + scheduler: s, + ttl: ttl, + cacheWriteTimeout: cacheWriteTimeout, + remoteURL: *remoteURL, authChallenger: &remoteAuthChallenger{ remoteURL: *remoteURL, cm: challenge.NewSimpleManager(), @@ -190,12 +199,13 @@ func (pr *proxyingRegistry) Repository(ctx context.Context, name reference.Named return &proxiedRepository{ blobStore: &proxyBlobStore{ - localStore: localRepo.Blobs(ctx), - remoteStore: remoteRepo.Blobs(ctx), - scheduler: pr.scheduler, - ttl: pr.ttl, - repositoryName: name, - authChallenger: pr.authChallenger, + localStore: localRepo.Blobs(ctx), + remoteStore: remoteRepo.Blobs(ctx), + scheduler: pr.scheduler, + ttl: pr.ttl, + cacheWriteTimeout: pr.cacheWriteTimeout, + repositoryName: name, + authChallenger: pr.authChallenger, }, manifests: &proxyManifestStore{ repositoryName: name, @@ -229,6 +239,9 @@ type Closer interface { } func (pr *proxyingRegistry) Close() error { + if pr.scheduler == nil { + return nil + } return pr.scheduler.Stop() } @@ -251,7 +264,9 @@ func (r *remoteAuthChallenger) credentialStore() auth.CredentialStore { } func (r *remoteAuthChallenger) challengeManager() challenge.Manager { - return r.cm + return challenge.NewFilteringManager(r.cm, func(c challenge.Challenge) bool { + return !strings.EqualFold(c.Scheme, "bearer") || realmAllowed(&r.remoteURL, c.Parameters["realm"]) + }) } // tryEstablishChallenges will attempt to get a challenge type for the upstream if none currently exist @@ -274,8 +289,8 @@ func (r *remoteAuthChallenger) tryEstablishChallenges(ctx context.Context) error if err := ping(r.cm, remoteURL.String(), challengeHeader); err != nil { return err } + dcontext.GetLogger(ctx).Infof("Challenge established with upstream: %s", remoteURL.Redacted()) - dcontext.GetLogger(ctx).Infof("Challenge established with upstream : %s %s", remoteURL, r.cm) return nil } diff --git a/vendor/github.com/distribution/distribution/v3/registry/proxy/proxytagservice.go b/vendor/github.com/distribution/distribution/v3/registry/proxy/proxytagservice.go index a55f22bb1..ec39c8a83 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/proxy/proxytagservice.go +++ b/vendor/github.com/distribution/distribution/v3/registry/proxy/proxytagservice.go @@ -65,3 +65,7 @@ func (pt proxyTagService) All(ctx context.Context) ([]string, error) { func (pt proxyTagService) Lookup(ctx context.Context, digest v1.Descriptor) ([]string, error) { return []string{}, distribution.ErrUnsupported } + +func (pt proxyTagService) List(ctx context.Context, limit int, last string) ([]string, error) { + return []string{}, distribution.ErrUnsupported +} diff --git a/vendor/github.com/distribution/distribution/v3/registry/registry.go b/vendor/github.com/distribution/distribution/v3/registry/registry.go index 29fc1c401..6801e4c57 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/registry.go +++ b/vendor/github.com/distribution/distribution/v3/registry/registry.go @@ -416,7 +416,7 @@ func configureLogging(ctx context.Context, config *configuration.Configuration) logrus.Debugf("using %q logging formatter", formatter) if len(config.Log.Fields) > 0 { // build up the static fields, if present. - var fields []interface{} + var fields []any for k := range config.Log.Fields { fields = append(fields, k) } diff --git a/vendor/github.com/distribution/distribution/v3/registry/storage/blobwriter.go b/vendor/github.com/distribution/distribution/v3/registry/storage/blobwriter.go index e3c7fe615..afa0ac804 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/storage/blobwriter.go +++ b/vendor/github.com/distribution/distribution/v3/registry/storage/blobwriter.go @@ -267,7 +267,7 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc v1.Descriptor) (v1. if !verified { dcontext.GetLoggerWithFields(ctx, - map[interface{}]interface{}{ + map[any]any{ "canonical": canonical, "provided": desc.Digest, }, "canonical", "provided"). diff --git a/vendor/github.com/distribution/distribution/v3/registry/storage/blobwriter_nonresumable.go b/vendor/github.com/distribution/distribution/v3/registry/storage/blobwriter_nonresumable.go index b3b3f6abe..bb2f7789a 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/storage/blobwriter_nonresumable.go +++ b/vendor/github.com/distribution/distribution/v3/registry/storage/blobwriter_nonresumable.go @@ -7,7 +7,7 @@ import ( "context" ) -// resumeHashAt is a noop when resumable digest support is disabled. +// resumeDigest is a noop when resumable digest support is disabled. func (bw *blobWriter) resumeDigest(ctx context.Context) error { return errResumableDigestNotAvailable } diff --git a/vendor/github.com/distribution/distribution/v3/registry/storage/blobwriter_resumable.go b/vendor/github.com/distribution/distribution/v3/registry/storage/blobwriter_resumable.go index 5c55d5403..6c7836385 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/storage/blobwriter_resumable.go +++ b/vendor/github.com/distribution/distribution/v3/registry/storage/blobwriter_resumable.go @@ -1,5 +1,4 @@ //go:build !noresumabledigest -// +build !noresumabledigest package storage diff --git a/vendor/github.com/distribution/distribution/v3/registry/storage/cache/redis/redis.go b/vendor/github.com/distribution/distribution/v3/registry/storage/cache/redis/redis.go index 3aee5c9a8..1db8b8ee0 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/storage/cache/redis/redis.go +++ b/vendor/github.com/distribution/distribution/v3/registry/storage/cache/redis/redis.go @@ -214,8 +214,10 @@ func (rsrbds *repositoryScopedRedisBlobDescriptorService) Clear(ctx context.Cont return err } + pool := rsrbds.upstream.pool + // Check membership to repository first - member, err := rsrbds.upstream.pool.SIsMember(ctx, rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst.String()).Result() + member, err := pool.SIsMember(ctx, rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst.String()).Result() if err != nil { return err } @@ -223,7 +225,13 @@ func (rsrbds *repositoryScopedRedisBlobDescriptorService) Clear(ctx context.Cont return distribution.ErrBlobUnknown } - return rsrbds.upstream.Clear(ctx, dgst) + pipe := pool.TxPipeline() + pipe.SRem(ctx, rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst.String()) + pipe.Del(ctx, rsrbds.blobDescriptorHashKey(dgst)) + pipe.HDel(ctx, rsrbds.upstream.blobDescriptorHashKey(dgst), "digest", "size", "mediatype") + + _, err = pipe.Exec(ctx) + return err } func (rsrbds *repositoryScopedRedisBlobDescriptorService) SetDescriptor(ctx context.Context, dgst digest.Digest, desc v1.Descriptor) error { diff --git a/vendor/github.com/distribution/distribution/v3/registry/storage/catalog.go b/vendor/github.com/distribution/distribution/v3/registry/storage/catalog.go index 08f9288ea..bfd659c17 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/storage/catalog.go +++ b/vendor/github.com/distribution/distribution/v3/registry/storage/catalog.go @@ -19,7 +19,7 @@ func (reg *registry) Repositories(ctx context.Context, repos []string, last stri foundRepos := 0 if len(repos) == 0 { - return 0, errors.New("Attempted to list 0 repositories") + return 0, errors.New("attempted to list 0 repositories") } root, err := pathFor(repositoriesRootPathSpec{}) @@ -107,10 +107,7 @@ func compareReplaceInline(s1, s2 string, old, new byte) int { // the exact same slice header. It will make the code unsafe but can // provide some extra performance. - l := len(s1) - if len(s2) < l { - l = len(s2) - } + l := min(len(s2), len(s1)) for i := 0; i < l; i++ { c1, c2 := s1[i], s2[i] diff --git a/vendor/github.com/distribution/distribution/v3/registry/storage/driver/base/regulator.go b/vendor/github.com/distribution/distribution/v3/registry/storage/driver/base/regulator.go index 2cf7a3ece..184df07d3 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/storage/driver/base/regulator.go +++ b/vendor/github.com/distribution/distribution/v3/registry/storage/driver/base/regulator.go @@ -24,7 +24,7 @@ type regulator struct { // concurrent calls given a minimum limit and default. // // If the parameter supplied is of an invalid type this returns an error. -func GetLimitFromParameter(param interface{}, min, def uint64) (uint64, error) { +func GetLimitFromParameter(param any, min, def uint64) (uint64, error) { limit := def switch v := param.(type) { diff --git a/vendor/github.com/distribution/distribution/v3/registry/storage/driver/factory/factory.go b/vendor/github.com/distribution/distribution/v3/registry/storage/driver/factory/factory.go index f52684b76..fca9a95be 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/storage/driver/factory/factory.go +++ b/vendor/github.com/distribution/distribution/v3/registry/storage/driver/factory/factory.go @@ -24,7 +24,7 @@ type StorageDriverFactory interface { // Create returns a new storagedriver.StorageDriver with the given parameters // Parameters will vary by driver and may be ignored // Each parameter key must only consist of lowercase letters and numbers - Create(ctx context.Context, parameters map[string]interface{}) (storagedriver.StorageDriver, error) + Create(ctx context.Context, parameters map[string]any) (storagedriver.StorageDriver, error) } // Register makes a storage driver available by the provided name. @@ -47,7 +47,7 @@ func Register(name string, factory StorageDriverFactory) { // parameters. To use a driver, the StorageDriverFactory must first be // registered with the given name. If no drivers are found, an // InvalidStorageDriverError is returned -func Create(ctx context.Context, name string, parameters map[string]interface{}) (storagedriver.StorageDriver, error) { +func Create(ctx context.Context, name string, parameters map[string]any) (storagedriver.StorageDriver, error) { driverFactory, ok := driverFactories[name] if !ok { return nil, InvalidStorageDriverError{name} diff --git a/vendor/github.com/distribution/distribution/v3/registry/storage/driver/inmemory/driver.go b/vendor/github.com/distribution/distribution/v3/registry/storage/driver/inmemory/driver.go index 0256603c1..eeff71d2f 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/storage/driver/inmemory/driver.go +++ b/vendor/github.com/distribution/distribution/v3/registry/storage/driver/inmemory/driver.go @@ -22,7 +22,7 @@ func init() { // inMemoryDriverFacotry implements the factory.StorageDriverFactory interface. type inMemoryDriverFactory struct{} -func (factory *inMemoryDriverFactory) Create(ctx context.Context, parameters map[string]interface{}) (storagedriver.StorageDriver, error) { +func (factory *inMemoryDriverFactory) Create(ctx context.Context, parameters map[string]any) (storagedriver.StorageDriver, error) { return New(), nil } diff --git a/vendor/github.com/distribution/distribution/v3/registry/storage/driver/inmemory/mfs.go b/vendor/github.com/distribution/distribution/v3/registry/storage/driver/inmemory/mfs.go index f3837c5f5..f52bddbf5 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/storage/driver/inmemory/mfs.go +++ b/vendor/github.com/distribution/distribution/v3/registry/storage/driver/inmemory/mfs.go @@ -164,8 +164,8 @@ func (d *dir) mkdirs(p string) (*dir, error) { return dd, nil } - components := strings.Split(relative, "/") - for _, component := range components { + components := strings.SplitSeq(relative, "/") + for component := range components { d, err := dd.mkdir(component) if err != nil { // This should actually never happen, since there are no children. @@ -302,10 +302,7 @@ func (f *file) WriteAt(p []byte, offset int64) (n int, err error) { if int64(cap(f.data)) < newLen { // Grow slice exponentially to ensure amortized linear time complexity // of reallocation - newCap := int64(float64(cap(f.data)) * reallocExponent) - if newCap < newLen { - newCap = newLen - } + newCap := max(int64(float64(cap(f.data))*reallocExponent), newLen) data := make([]byte, len(f.data), newCap) copy(data, f.data) f.data = data diff --git a/vendor/github.com/distribution/distribution/v3/registry/storage/driver/middleware/storagemiddleware.go b/vendor/github.com/distribution/distribution/v3/registry/storage/driver/middleware/storagemiddleware.go index d2c37741e..0369144c9 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/storage/driver/middleware/storagemiddleware.go +++ b/vendor/github.com/distribution/distribution/v3/registry/storage/driver/middleware/storagemiddleware.go @@ -9,7 +9,7 @@ import ( // InitFunc is the type of a StorageMiddleware factory function and is // used to register the constructor for different StorageMiddleware backends. -type InitFunc func(ctx context.Context, storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) +type InitFunc func(ctx context.Context, storageDriver storagedriver.StorageDriver, options map[string]any) (storagedriver.StorageDriver, error) var storageMiddlewares map[string]InitFunc @@ -29,7 +29,7 @@ func Register(name string, initFunc InitFunc) error { } // Get constructs a StorageMiddleware with the given options using the named backend. -func Get(ctx context.Context, name string, options map[string]interface{}, storageDriver storagedriver.StorageDriver) (storagedriver.StorageDriver, error) { +func Get(ctx context.Context, name string, options map[string]any, storageDriver storagedriver.StorageDriver) (storagedriver.StorageDriver, error) { if storageMiddlewares != nil { if initFunc, exists := storageMiddlewares[name]; exists { return initFunc(ctx, storageDriver, options) diff --git a/vendor/github.com/distribution/distribution/v3/registry/storage/driver/storagedriver.go b/vendor/github.com/distribution/distribution/v3/registry/storage/driver/storagedriver.go index c12f79de3..180a5fd1b 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/storage/driver/storagedriver.go +++ b/vendor/github.com/distribution/distribution/v3/registry/storage/driver/storagedriver.go @@ -216,11 +216,12 @@ func (e Errors) Error() string { case 1: return fmt.Sprintf("%s: %s", e.DriverName, e.Errs[0].Error()) default: - msg := "errors:\n" + var msg strings.Builder + msg.WriteString("errors:\n") for _, err := range e.Errs { - msg += err.Error() + "\n" + msg.WriteString(err.Error() + "\n") } - return fmt.Sprintf("%s: %s", e.DriverName, msg) + return fmt.Sprintf("%s: %s", e.DriverName, msg.String()) } } diff --git a/vendor/github.com/distribution/distribution/v3/registry/storage/garbagecollect.go b/vendor/github.com/distribution/distribution/v3/registry/storage/garbagecollect.go index 8b4ae73c7..a95d7f40c 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/storage/garbagecollect.go +++ b/vendor/github.com/distribution/distribution/v3/registry/storage/garbagecollect.go @@ -12,7 +12,7 @@ import ( v1 "github.com/opencontainers/image-spec/specs-go/v1" ) -func emit(format string, a ...interface{}) { +func emit(format string, a ...any) { fmt.Printf(format+"\n", a...) } diff --git a/vendor/github.com/distribution/distribution/v3/registry/storage/linkedblobstore.go b/vendor/github.com/distribution/distribution/v3/registry/storage/linkedblobstore.go index fc5e98c7a..212e82bce 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/storage/linkedblobstore.go +++ b/vendor/github.com/distribution/distribution/v3/registry/storage/linkedblobstore.go @@ -10,9 +10,9 @@ import ( "github.com/distribution/distribution/v3" "github.com/distribution/distribution/v3/internal/dcontext" + "github.com/distribution/distribution/v3/internal/uuid" "github.com/distribution/distribution/v3/registry/storage/driver" "github.com/distribution/reference" - "github.com/google/uuid" "github.com/opencontainers/go-digest" v1 "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -102,16 +102,16 @@ func (lbs *linkedBlobStore) Put(ctx context.Context, mediaType string, p []byte) return desc, lbs.linkBlob(ctx, desc) } -type optionFunc func(interface{}) error +type optionFunc func(any) error -func (f optionFunc) Apply(v interface{}) error { +func (f optionFunc) Apply(v any) error { return f(v) } // WithMountFrom returns a BlobCreateOption which designates that the blob should be // mounted from the given canonical reference. func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { - return optionFunc(func(v interface{}) error { + return optionFunc(func(v any) error { opts, ok := v.(*distribution.CreateOptions) if !ok { return fmt.Errorf("unexpected options type: %T", v) diff --git a/vendor/github.com/distribution/distribution/v3/registry/storage/tagstore.go b/vendor/github.com/distribution/distribution/v3/registry/storage/tagstore.go index 3639e2d9c..74afe862e 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/storage/tagstore.go +++ b/vendor/github.com/distribution/distribution/v3/registry/storage/tagstore.go @@ -2,8 +2,11 @@ package storage import ( "context" + "errors" + "io" "path" "sort" + "strings" "sync" "github.com/opencontainers/go-digest" @@ -230,3 +233,85 @@ func (ts *tagStore) ManifestDigests(ctx context.Context, tag string) ([]digest.D } return dgsts, nil } + +// List returns the tags for the repository. +func (ts *tagStore) List(ctx context.Context, limit int, last string) ([]string, error) { + filledBuffer := false + foundTags := 0 + var tags []string + + if limit == 0 { + return tags, errors.New("attempted to list 0 tags") + } + + root, err := pathFor(manifestTagsPathSpec{ + name: ts.repository.Named().Name(), + }) + if err != nil { + return tags, err + } + + startAfter := "" + if last != "" { + startAfter, err = pathFor(manifestTagPathSpec{ + name: ts.repository.Named().Name(), + tag: last, + }) + if err != nil { + return tags, err + } + } + + err = ts.blobStore.driver.Walk(ctx, root, func(fileInfo storagedriver.FileInfo) error { + return handleTag(fileInfo, root, last, func(tagPath string) error { + tags = append(tags, tagPath) + foundTags += 1 + // if we've filled our slice, no need to walk any further + if limit > 0 && foundTags == limit { + filledBuffer = true + return storagedriver.ErrFilledBuffer + } + return nil + }) + }, storagedriver.WithStartAfterHint(startAfter)) + + if err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + return tags, distribution.ErrRepositoryUnknown{Name: ts.repository.Named().Name()} + default: + return tags, err + } + } + + if filledBuffer { + // There are potentially more tags to list + return tags, nil + } + + // We didn't fill the buffer, so that's the end of the list of tags + return tags, io.EOF +} + +// handleTag calls function fn with a tag path if fileInfo +// has a path of a tag under root and that it is lexographically +// after last. Otherwise, it will return ErrSkipDir or ErrFilledBuffer. +// These should be used with Walk to do handling with repositories in a +// storage. +func handleTag(fileInfo storagedriver.FileInfo, root, last string, fn func(tagPath string) error) error { + filePath := fileInfo.Path() + + // lop the base path off + tag := filePath[len(root)+1:] + parts := strings.SplitN(tag, "/", 2) + if len(parts) > 1 { + return storagedriver.ErrSkipDir + } + + if lessPath(last, tag) { + if err := fn(tag); err != nil { + return err + } + } + return storagedriver.ErrSkipDir +} diff --git a/vendor/github.com/distribution/distribution/v3/registry/storage/vacuum.go b/vendor/github.com/distribution/distribution/v3/registry/storage/vacuum.go index e074b45c9..ff981bb47 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/storage/vacuum.go +++ b/vendor/github.com/distribution/distribution/v3/registry/storage/vacuum.go @@ -91,11 +91,30 @@ func (v Vacuum) RemoveRepository(repoName string) error { if err != nil { return err } - repoDir := path.Join(rootForRepository, repoName) - dcontext.GetLogger(v.ctx).Infof("Deleting repo: %s", repoDir) - err = v.driver.Delete(v.ctx, repoDir) + repoManifestDir := path.Join(rootForRepository, repoName, "_manifests") + dcontext.GetLogger(v.ctx).Infof("Deleting repo: %s", repoManifestDir) + err = v.driver.Delete(v.ctx, repoManifestDir) if err != nil { - return err + if _, ok := err.(driver.PathNotFoundError); !ok { + return err + } + } + repoLayerDir := path.Join(rootForRepository, repoName, "_layers") + dcontext.GetLogger(v.ctx).Infof("Deleting repo: %s", repoLayerDir) + err = v.driver.Delete(v.ctx, repoLayerDir) + if err != nil { + if _, ok := err.(driver.PathNotFoundError); !ok { + return err + } + } + + repoUploadDir := path.Join(rootForRepository, repoName, "_uploads") + dcontext.GetLogger(v.ctx).Infof("Deleting repo: %s", repoUploadDir) + err = v.driver.Delete(v.ctx, repoUploadDir) + if err != nil { + if _, ok := err.(driver.PathNotFoundError); !ok { + return err + } } return nil diff --git a/vendor/github.com/distribution/distribution/v3/tags.go b/vendor/github.com/distribution/distribution/v3/tags.go index ed94a51a0..670f0ac2d 100644 --- a/vendor/github.com/distribution/distribution/v3/tags.go +++ b/vendor/github.com/distribution/distribution/v3/tags.go @@ -27,6 +27,9 @@ type TagService interface { // Lookup returns the set of tags referencing the given digest. Lookup(ctx context.Context, digest v1.Descriptor) ([]string, error) + + // List returns the set of tags after last managed by this tag service + List(ctx context.Context, limit int, last string) ([]string, error) } // TagManifestsProvider provides method to retrieve the digests of manifests that a tag historically diff --git a/vendor/github.com/distribution/distribution/v3/tracing/tracing.go b/vendor/github.com/distribution/distribution/v3/tracing/tracing.go index dca9d584f..be9561398 100644 --- a/vendor/github.com/distribution/distribution/v3/tracing/tracing.go +++ b/vendor/github.com/distribution/distribution/v3/tracing/tracing.go @@ -29,11 +29,17 @@ const ( // InitOpenTelemetry initializes OpenTelemetry for the application. This function sets up the // necessary components for collecting telemetry data, such as traces. func InitOpenTelemetry(ctx context.Context) error { - res := resource.NewWithAttributes( - semconv.SchemaURL, - semconv.ServiceNameKey.String(serviceName), - semconv.ServiceVersionKey.String(version.Version()), + res, err := resource.New( + ctx, + resource.WithAttributes( + semconv.ServiceNameKey.String(serviceName), + semconv.ServiceVersionKey.String(version.Version()), + ), + resource.WithFromEnv(), // OTEL_SERVICE_NAME ) + if err != nil { + return err + } autoExp, err := autoexport.NewSpanExporter(ctx) if err != nil { diff --git a/vendor/github.com/distribution/distribution/v3/version/version.go b/vendor/github.com/distribution/distribution/v3/version/version.go index 63bbb2a0e..0d958c4cd 100644 --- a/vendor/github.com/distribution/distribution/v3/version/version.go +++ b/vendor/github.com/distribution/distribution/v3/version/version.go @@ -8,7 +8,7 @@ var mainpkg = "github.com/distribution/distribution/v3" // the latest release tag by hand, always suffixed by "+unknown". During // build, it will be replaced by the actual version. The value here will be // used if the registry is run after a go get based install. -var version = "v3.0.0+unknown" +var version = "v3.1.0+unknown" // revision is filled with the VCS (e.g. git) revision being used to build // the program at linking time. diff --git a/vendor/github.com/fxamacker/cbor/v2/.golangci.yml b/vendor/github.com/fxamacker/cbor/v2/.golangci.yml index 38cb9ae10..08081fbde 100644 --- a/vendor/github.com/fxamacker/cbor/v2/.golangci.yml +++ b/vendor/github.com/fxamacker/cbor/v2/.golangci.yml @@ -1,104 +1,116 @@ -# Do not delete linter settings. Linters like gocritic can be enabled on the command line. - -linters-settings: - depguard: - rules: - prevent_unmaintained_packages: - list-mode: strict - files: - - $all - - "!$test" - allow: - - $gostd - - github.com/x448/float16 - deny: - - pkg: io/ioutil - desc: "replaced by io and os packages since Go 1.16: https://tip.golang.org/doc/go1.16#ioutil" - dupl: - threshold: 100 - funlen: - lines: 100 - statements: 50 - goconst: - ignore-tests: true - min-len: 2 - min-occurrences: 3 - gocritic: - enabled-tags: - - diagnostic - - experimental - - opinionated - - performance - - style - disabled-checks: - - commentedOutCode - - dupImport # https://github.com/go-critic/go-critic/issues/845 - - ifElseChain - - octalLiteral - - paramTypeCombine - - whyNoLint - gofmt: - simplify: false - goimports: - local-prefixes: github.com/fxamacker/cbor - golint: - min-confidence: 0 - govet: - check-shadowing: true - lll: - line-length: 140 - maligned: - suggest-new: true - misspell: - locale: US - staticcheck: - checks: ["all"] - +version: "2" linters: - disable-all: true + default: none enable: - asciicheck - bidichk - depguard - errcheck - - exportloopref + - forbidigo - goconst - gocritic - gocyclo - - gofmt - - goimports - goprintffuncname - gosec - - gosimple - govet - ineffassign - misspell - nilerr - revive - staticcheck - - stylecheck - - typecheck - unconvert - unused - + settings: + depguard: + rules: + prevent_unmaintained_packages: + list-mode: strict + files: + - $all + - '!$test' + allow: + - $gostd + - github.com/x448/float16 + deny: + - pkg: io/ioutil + desc: 'replaced by io and os packages since Go 1.16: https://tip.golang.org/doc/go1.16#ioutil' + dupl: + threshold: 100 + funlen: + lines: 100 + statements: 50 + goconst: + min-len: 2 + min-occurrences: 3 + gocritic: + disabled-checks: + - commentedOutCode + - dupImport + - ifElseChain + - octalLiteral + - paramTypeCombine + - whyNoLint + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + govet: + enable: + - shadow + lll: + line-length: 140 + misspell: + locale: US + staticcheck: + checks: + - all + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + rules: + - path: decode.go + text: string ` overflows ` has (\d+) occurrences, make it a constant + - path: decode.go + text: string ` \(range is \[` has (\d+) occurrences, make it a constant + - path: decode.go + text: string `, ` has (\d+) occurrences, make it a constant + - path: decode.go + text: string ` overflows Go's int64` has (\d+) occurrences, make it a constant + - path: decode.go + text: string `\]\)` has (\d+) occurrences, make it a constant + - path: valid.go + text: string ` for type ` has (\d+) occurrences, make it a constant + - path: valid.go + text: 'string `cbor: ` has (\d+) occurrences, make it a constant' + - linters: + - goconst + path: (.+)_test\.go + paths: + - third_party$ + - builtin$ + - examples$ issues: - # max-issues-per-linter default is 50. Set to 0 to disable limit. max-issues-per-linter: 0 - # max-same-issues default is 3. Set to 0 to disable limit. max-same-issues: 0 - - exclude-rules: - - path: decode.go - text: "string ` overflows ` has (\\d+) occurrences, make it a constant" - - path: decode.go - text: "string ` \\(range is \\[` has (\\d+) occurrences, make it a constant" - - path: decode.go - text: "string `, ` has (\\d+) occurrences, make it a constant" - - path: decode.go - text: "string ` overflows Go's int64` has (\\d+) occurrences, make it a constant" - - path: decode.go - text: "string `\\]\\)` has (\\d+) occurrences, make it a constant" - - path: valid.go - text: "string ` for type ` has (\\d+) occurrences, make it a constant" - - path: valid.go - text: "string `cbor: ` has (\\d+) occurrences, make it a constant" +formatters: + enable: + - gofmt + - goimports + settings: + gofmt: + simplify: false + goimports: + local-prefixes: + - github.com/fxamacker/cbor + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/vendor/github.com/fxamacker/cbor/v2/README.md b/vendor/github.com/fxamacker/cbor/v2/README.md index d072b81c7..f9ae78ec9 100644 --- a/vendor/github.com/fxamacker/cbor/v2/README.md +++ b/vendor/github.com/fxamacker/cbor/v2/README.md @@ -702,21 +702,20 @@ Default limits may need to be increased for systems handling very large data (e. ## Status -[v2.9.0](https://github.com/fxamacker/cbor/releases/tag/v2.9.0) (Jul 13, 2025) improved interoperability/transcoding between CBOR & JSON, refactored tests, and improved docs. -- Add opt-in support for `encoding.TextMarshaler` and `encoding.TextUnmarshaler` to encode and decode from CBOR text string. -- Add opt-in support for `json.Marshaler` and `json.Unmarshaler` via user-provided transcoding function. -- Update docs for TimeMode, Tag, RawTag, and add example for Embedded JSON Tag for CBOR. +v2.9.1 (Mar 29-30, 2026) includes important bugfixes, defensive checks, improved code quality, and more tests. Although not public, the fuzzer was also improved by adding more fuzz tests. -v2.9.0 passed fuzz tests and is production quality. +v2.9.1 passed fuzz tests and is production quality. The minimum version of Go required to build: - v2.8.0 and newer releases require go 1.20+. - v2.7.1 and older releases require go 1.17+. -For more details, see [release notes](https://github.com/fxamacker/cbor/releases). +For more details, see [v2.9.1 release notes](https://github.com/fxamacker/cbor/releases). ### Prior Releases +[v2.9.0](https://github.com/fxamacker/cbor/releases/tag/v2.9.0) (Jul 13, 2025) improved interoperability/transcoding between CBOR & JSON, refactored tests, and improved docs. It passed fuzz tests (billions of executions) and is production quality. + [v2.8.0](https://github.com/fxamacker/cbor/releases/tag/v2.8.0) (March 30, 2025) is a small release primarily to add `omitzero` option to struct field tags and fix bugs. It passed fuzz tests (billions of executions) and is production quality. [v2.7.0](https://github.com/fxamacker/cbor/releases/tag/v2.7.0) (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality. diff --git a/vendor/github.com/fxamacker/cbor/v2/cache.go b/vendor/github.com/fxamacker/cbor/v2/cache.go index 5051f110f..5743f3eb2 100644 --- a/vendor/github.com/fxamacker/cbor/v2/cache.go +++ b/vendor/github.com/fxamacker/cbor/v2/cache.go @@ -92,94 +92,126 @@ func newTypeInfo(t reflect.Type) *typeInfo { } type decodingStructType struct { - fields fields - fieldIndicesByName map[string]int - err error - toArray bool + fields decodingFields + fieldIndicesByName map[string]int // Only populated if toArray is false + fieldIndicesByIntKey map[int64]int // Only populated if toArray is false + err error + toArray bool } -// The stdlib errors.Join was introduced in Go 1.20, and we still support Go 1.17, so instead, -// here's a very basic implementation of an aggregated error. -type multierror []error - -func (m multierror) Error() string { - var sb strings.Builder - for i, err := range m { - sb.WriteString(err.Error()) - if i < len(m)-1 { - sb.WriteString(", ") - } - } - return sb.String() -} - -func getDecodingStructType(t reflect.Type) *decodingStructType { +func getDecodingStructType(t reflect.Type) (*decodingStructType, error) { if v, _ := decodingStructTypeCache.Load(t); v != nil { - return v.(*decodingStructType) + structType := v.(*decodingStructType) + if structType.err != nil { + return nil, structType.err + } + return structType, nil } flds, structOptions := getFields(t) toArray := hasToArrayOption(structOptions) - var errs []error - for i := 0; i < len(flds); i++ { - if flds[i].keyAsInt { - nameAsInt, numErr := strconv.Atoi(flds[i].name) - if numErr != nil { - errs = append(errs, errors.New("cbor: failed to parse field name \""+flds[i].name+"\" to int ("+numErr.Error()+")")) - break + if toArray { + return getDecodingStructToArrayType(t, flds) + } + + fieldIndicesByName := make(map[string]int, len(flds)) + var fieldIndicesByIntKey map[int64]int + + decFlds := make(decodingFields, len(flds)) + for i, f := range flds { + // nameAsInt is set in getFields() except for fields with an unparsable tagged name. + // Atoi() is called here to catch and save parsing errors. + if f.keyAsInt && f.nameAsInt == 0 { + if _, numErr := strconv.Atoi(f.name); numErr != nil { + structType := &decodingStructType{ + err: errors.New("cbor: failed to parse field name \"" + f.name + "\" to int (" + numErr.Error() + ")"), + } + decodingStructTypeCache.Store(t, structType) + return nil, structType.err } - flds[i].nameAsInt = int64(nameAsInt) } - flds[i].typInfo = getTypeInfo(flds[i].typ) - } + if f.keyAsInt { + if fieldIndicesByIntKey == nil { + fieldIndicesByIntKey = make(map[int64]int, len(flds)) + } + // The duplication check is only a safeguard, since getFields() already deduplicates fields. + if _, ok := fieldIndicesByIntKey[f.nameAsInt]; ok { + structType := &decodingStructType{ + err: fmt.Errorf("cbor: two or more fields of %v have the same keyasint value %d", t, f.nameAsInt), + } + decodingStructTypeCache.Store(t, structType) + return nil, structType.err + } + fieldIndicesByIntKey[f.nameAsInt] = i + } else { + // The duplication check is only a safeguard, since getFields() already deduplicates fields. + if _, ok := fieldIndicesByName[f.name]; ok { + structType := &decodingStructType{ + err: fmt.Errorf("cbor: two or more fields of %v have the same name %q", t, f.name), + } + decodingStructTypeCache.Store(t, structType) + return nil, structType.err + } + fieldIndicesByName[f.name] = i + } - fieldIndicesByName := make(map[string]int, len(flds)) - for i, fld := range flds { - if _, ok := fieldIndicesByName[fld.name]; ok { - errs = append(errs, fmt.Errorf("cbor: two or more fields of %v have the same name %q", t, fld.name)) - continue + decFlds[i] = &decodingField{ + field: *f, + typInfo: getTypeInfo(f.typ), } - fieldIndicesByName[fld.name] = i } - var err error - { - var multi multierror - for _, each := range errs { - if each != nil { - multi = append(multi, each) + structType := &decodingStructType{ + fields: decFlds, + fieldIndicesByName: fieldIndicesByName, + fieldIndicesByIntKey: fieldIndicesByIntKey, + } + decodingStructTypeCache.Store(t, structType) + return structType, nil +} + +func getDecodingStructToArrayType(t reflect.Type, flds fields) (*decodingStructType, error) { + decFlds := make(decodingFields, len(flds)) + for i, f := range flds { + // nameAsInt is set in getFields() except for fields with an unparsable tagged name. + // Atoi() is called here to catch and save parsing errors. + if f.keyAsInt && f.nameAsInt == 0 { + if _, numErr := strconv.Atoi(f.name); numErr != nil { + structType := &decodingStructType{ + err: errors.New("cbor: failed to parse field name \"" + f.name + "\" to int (" + numErr.Error() + ")"), + } + decodingStructTypeCache.Store(t, structType) + return nil, structType.err } } - if len(multi) == 1 { - err = multi[0] - } else if len(multi) > 1 { - err = multi + + decFlds[i] = &decodingField{ + field: *f, + typInfo: getTypeInfo(f.typ), } } structType := &decodingStructType{ - fields: flds, - fieldIndicesByName: fieldIndicesByName, - err: err, - toArray: toArray, + fields: decFlds, + toArray: true, } decodingStructTypeCache.Store(t, structType) - return structType + return structType, nil } type encodingStructType struct { - fields fields - bytewiseFields fields - lengthFirstFields fields - omitEmptyFieldsIdx []int + fields encodingFields + bytewiseFields encodingFields // Only populated if toArray is false + lengthFirstFields encodingFields // Only populated if toArray is false + omitEmptyFieldsIdx []int // Only populated if toArray is false err error toArray bool } -func (st *encodingStructType) getFields(em *encMode) fields { +func (st *encodingStructType) getFields(em *encMode) encodingFields { switch em.sort { case SortNone, SortFastShuffle: return st.fields @@ -191,7 +223,7 @@ func (st *encodingStructType) getFields(em *encMode) fields { } type bytewiseFieldSorter struct { - fields fields + fields encodingFields } func (x *bytewiseFieldSorter) Len() int { @@ -203,11 +235,11 @@ func (x *bytewiseFieldSorter) Swap(i, j int) { } func (x *bytewiseFieldSorter) Less(i, j int) bool { - return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0 + return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) < 0 } type lengthFirstFieldSorter struct { - fields fields + fields encodingFields } func (x *lengthFirstFieldSorter) Len() int { @@ -222,13 +254,16 @@ func (x *lengthFirstFieldSorter) Less(i, j int) bool { if len(x.fields[i].cborName) != len(x.fields[j].cborName) { return len(x.fields[i].cborName) < len(x.fields[j].cborName) } - return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0 + return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) < 0 } func getEncodingStructType(t reflect.Type) (*encodingStructType, error) { if v, _ := encodingStructTypeCache.Load(t); v != nil { structType := v.(*encodingStructType) - return structType, structType.err + if structType.err != nil { + return nil, structType.err + } + return structType, nil } flds, structOptions := getFields(t) @@ -237,111 +272,119 @@ func getEncodingStructType(t reflect.Type) (*encodingStructType, error) { return getEncodingStructToArrayType(t, flds) } - var err error var hasKeyAsInt bool var hasKeyAsStr bool var omitEmptyIdx []int + + encFlds := make(encodingFields, len(flds)) + e := getEncodeBuffer() - for i := 0; i < len(flds); i++ { + defer putEncodeBuffer(e) + + for i, f := range flds { + encFlds[i] = &encodingField{field: *f} + ef := encFlds[i] + // Get field's encodeFunc - flds[i].ef, flds[i].ief, flds[i].izf = getEncodeFunc(flds[i].typ) - if flds[i].ef == nil { - err = &UnsupportedTypeError{t} - break + ef.ef, ef.ief, ef.izf = getEncodeFunc(f.typ) + if ef.ef == nil { + structType := &encodingStructType{err: &UnsupportedTypeError{t}} + encodingStructTypeCache.Store(t, structType) + return nil, structType.err } // Encode field name - if flds[i].keyAsInt { - nameAsInt, numErr := strconv.Atoi(flds[i].name) - if numErr != nil { - err = errors.New("cbor: failed to parse field name \"" + flds[i].name + "\" to int (" + numErr.Error() + ")") - break + if f.keyAsInt { + if f.nameAsInt == 0 { + // nameAsInt is set in getFields() except for fields with an unparsable tagged name. + // Atoi() is called here to catch and save parsing errors. + if _, numErr := strconv.Atoi(f.name); numErr != nil { + structType := &encodingStructType{ + err: errors.New("cbor: failed to parse field name \"" + f.name + "\" to int (" + numErr.Error() + ")"), + } + encodingStructTypeCache.Store(t, structType) + return nil, structType.err + } } - flds[i].nameAsInt = int64(nameAsInt) + nameAsInt := f.nameAsInt if nameAsInt >= 0 { - encodeHead(e, byte(cborTypePositiveInt), uint64(nameAsInt)) + encodeHead(e, byte(cborTypePositiveInt), uint64(nameAsInt)) //nolint:gosec } else { n := nameAsInt*(-1) - 1 - encodeHead(e, byte(cborTypeNegativeInt), uint64(n)) + encodeHead(e, byte(cborTypeNegativeInt), uint64(n)) //nolint:gosec } - flds[i].cborName = make([]byte, e.Len()) - copy(flds[i].cborName, e.Bytes()) + ef.cborName = make([]byte, e.Len()) + copy(ef.cborName, e.Bytes()) e.Reset() hasKeyAsInt = true } else { - encodeHead(e, byte(cborTypeTextString), uint64(len(flds[i].name))) - flds[i].cborName = make([]byte, e.Len()+len(flds[i].name)) - n := copy(flds[i].cborName, e.Bytes()) - copy(flds[i].cborName[n:], flds[i].name) + encodeHead(e, byte(cborTypeTextString), uint64(len(f.name))) + ef.cborName = make([]byte, e.Len()+len(f.name)) + n := copy(ef.cborName, e.Bytes()) + copy(ef.cborName[n:], f.name) e.Reset() // If cborName contains a text string, then cborNameByteString contains a // string that has the byte string major type but is otherwise identical to // cborName. - flds[i].cborNameByteString = make([]byte, len(flds[i].cborName)) - copy(flds[i].cborNameByteString, flds[i].cborName) + ef.cborNameByteString = make([]byte, len(ef.cborName)) + copy(ef.cborNameByteString, ef.cborName) // Reset encoded CBOR type to byte string, preserving the "additional // information" bits: - flds[i].cborNameByteString[0] = byte(cborTypeByteString) | - getAdditionalInformation(flds[i].cborNameByteString[0]) + ef.cborNameByteString[0] = byte(cborTypeByteString) | + getAdditionalInformation(ef.cborNameByteString[0]) hasKeyAsStr = true } // Check if field can be omitted when empty - if flds[i].omitEmpty { + if f.omitEmpty { omitEmptyIdx = append(omitEmptyIdx, i) } } - putEncodeBuffer(e) - - if err != nil { - structType := &encodingStructType{err: err} - encodingStructTypeCache.Store(t, structType) - return structType, structType.err - } // Sort fields by canonical order - bytewiseFields := make(fields, len(flds)) - copy(bytewiseFields, flds) + bytewiseFields := make(encodingFields, len(encFlds)) + copy(bytewiseFields, encFlds) sort.Sort(&bytewiseFieldSorter{bytewiseFields}) lengthFirstFields := bytewiseFields if hasKeyAsInt && hasKeyAsStr { - lengthFirstFields = make(fields, len(flds)) - copy(lengthFirstFields, flds) + lengthFirstFields = make(encodingFields, len(encFlds)) + copy(lengthFirstFields, encFlds) sort.Sort(&lengthFirstFieldSorter{lengthFirstFields}) } structType := &encodingStructType{ - fields: flds, + fields: encFlds, bytewiseFields: bytewiseFields, lengthFirstFields: lengthFirstFields, omitEmptyFieldsIdx: omitEmptyIdx, } encodingStructTypeCache.Store(t, structType) - return structType, structType.err + return structType, nil } func getEncodingStructToArrayType(t reflect.Type, flds fields) (*encodingStructType, error) { - for i := 0; i < len(flds); i++ { - // Get field's encodeFunc - flds[i].ef, flds[i].ief, flds[i].izf = getEncodeFunc(flds[i].typ) - if flds[i].ef == nil { + encFlds := make(encodingFields, len(flds)) + for i, f := range flds { + encFlds[i] = &encodingField{field: *f} + encFlds[i].ef, encFlds[i].ief, encFlds[i].izf = getEncodeFunc(f.typ) + if encFlds[i].ef == nil { structType := &encodingStructType{err: &UnsupportedTypeError{t}} encodingStructTypeCache.Store(t, structType) - return structType, structType.err + return nil, structType.err } } structType := &encodingStructType{ - fields: flds, + fields: encFlds, toArray: true, } encodingStructTypeCache.Store(t, structType) - return structType, structType.err + return structType, nil } func getEncodeFunc(t reflect.Type) (encodeFunc, isEmptyFunc, isZeroFunc) { diff --git a/vendor/github.com/fxamacker/cbor/v2/decode.go b/vendor/github.com/fxamacker/cbor/v2/decode.go index f0bdc3b38..03fd7f8b0 100644 --- a/vendor/github.com/fxamacker/cbor/v2/decode.go +++ b/vendor/github.com/fxamacker/cbor/v2/decode.go @@ -16,7 +16,6 @@ import ( "math/big" "reflect" "strconv" - "strings" "time" "unicode/utf8" @@ -326,14 +325,14 @@ func (dmkm DupMapKeyMode) valid() bool { return dmkm >= 0 && dmkm < maxDupMapKeyMode } -// IndefLengthMode specifies whether to allow indefinite length items. +// IndefLengthMode specifies whether to allow indefinite-length items. type IndefLengthMode int const ( - // IndefLengthAllowed allows indefinite length items. + // IndefLengthAllowed allows indefinite-length items. IndefLengthAllowed IndefLengthMode = iota - // IndefLengthForbidden disallows indefinite length items. + // IndefLengthForbidden disallows indefinite-length items. IndefLengthForbidden maxIndefLengthMode @@ -378,6 +377,7 @@ const ( // - int64 if value fits // - big.Int or *big.Int (see BigIntDecMode) if value < math.MinInt64 // - return UnmarshalTypeError if value > math.MaxInt64 + // // Deprecated: IntDecConvertSigned should not be used. // Please use other options, such as IntDecConvertSignedOrError, IntDecConvertSignedOrBigInt, IntDecConvertNone. IntDecConvertSigned @@ -811,7 +811,7 @@ type DecOptions struct { // Default is 128*1024=131072 and it can be set to [16, 2147483647] MaxMapPairs int - // IndefLength specifies whether to allow indefinite length CBOR items. + // IndefLength specifies whether to allow indefinite-length CBOR items. IndefLength IndefLengthMode // TagsMd specifies whether to allow CBOR tags (major type 6). @@ -1055,7 +1055,7 @@ func (opts DecOptions) decMode() (*decMode, error) { //nolint:gocritic // ignore } if !opts.ExtraReturnErrors.valid() { - return nil, errors.New("cbor: invalid ExtraReturnErrors " + strconv.Itoa(int(opts.ExtraReturnErrors))) + return nil, errors.New("cbor: invalid ExtraReturnErrors " + strconv.Itoa(int(opts.ExtraReturnErrors))) //nolint:gosec } if opts.DefaultMapType != nil && opts.DefaultMapType.Kind() != reflect.Map { @@ -1149,8 +1149,8 @@ func (opts DecOptions) decMode() (*decMode, error) { //nolint:gocritic // ignore unrecognizedTagToAny: opts.UnrecognizedTagToAny, timeTagToAny: opts.TimeTagToAny, simpleValues: simpleValues, - nanDec: opts.NaN, - infDec: opts.Inf, + nan: opts.NaN, + inf: opts.Inf, byteStringToTime: opts.ByteStringToTime, byteStringExpectedFormat: opts.ByteStringExpectedFormat, bignumTag: opts.BignumTag, @@ -1230,8 +1230,8 @@ type decMode struct { unrecognizedTagToAny UnrecognizedTagToAnyMode timeTagToAny TimeTagToAnyMode simpleValues *SimpleValueRegistry - nanDec NaNMode - infDec InfMode + nan NaNMode + inf InfMode byteStringToTime ByteStringToTimeMode byteStringExpectedFormat ByteStringExpectedFormatMode bignumTag BignumTagMode @@ -1272,8 +1272,8 @@ func (dm *decMode) DecOptions() DecOptions { UnrecognizedTagToAny: dm.unrecognizedTagToAny, TimeTagToAny: dm.timeTagToAny, SimpleValues: simpleValues, - NaN: dm.nanDec, - Inf: dm.infDec, + NaN: dm.nan, + Inf: dm.inf, ByteStringToTime: dm.byteStringToTime, ByteStringExpectedFormat: dm.byteStringExpectedFormat, BignumTag: dm.bignumTag, @@ -1583,11 +1583,11 @@ func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolin _, ai, val := d.getHead() switch ai { case additionalInformationAsFloat16: - f := float64(float16.Frombits(uint16(val)).Float32()) + f := float64(float16.Frombits(uint16(val)).Float32()) //nolint:gosec return fillFloat(t, f, v) case additionalInformationAsFloat32: - f := float64(math.Float32frombits(uint32(val))) + f := float64(math.Float32frombits(uint32(val))) //nolint:gosec return fillFloat(t, f, v) case additionalInformationAsFloat64: @@ -1595,10 +1595,10 @@ func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolin return fillFloat(t, f, v) default: // ai <= 24 - if d.dm.simpleValues.rejected[SimpleValue(val)] { + if d.dm.simpleValues.rejected[SimpleValue(val)] { //nolint:gosec return &UnacceptableDataItemError{ CBORType: t.String(), - Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized", + Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized", //nolint:gosec } } @@ -1677,20 +1677,23 @@ func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolin return d.parseToValue(v, tInfo) case cborTypeArray: - if tInfo.nonPtrKind == reflect.Slice { + switch tInfo.nonPtrKind { + case reflect.Slice: return d.parseArrayToSlice(v, tInfo) - } else if tInfo.nonPtrKind == reflect.Array { + case reflect.Array: return d.parseArrayToArray(v, tInfo) - } else if tInfo.nonPtrKind == reflect.Struct { + case reflect.Struct: return d.parseArrayToStruct(v, tInfo) } + d.skip() return &UnmarshalTypeError{CBORType: t.String(), GoType: tInfo.nonPtrType.String()} case cborTypeMap: - if tInfo.nonPtrKind == reflect.Struct { + switch tInfo.nonPtrKind { + case reflect.Struct: return d.parseMapToStruct(v, tInfo) - } else if tInfo.nonPtrKind == reflect.Map { + case reflect.Map: return d.parseMapToMap(v, tInfo) } d.skip() @@ -1745,8 +1748,8 @@ func (d *decoder) parseToTime() (time.Time, bool, error) { // Read tag number _, _, tagNum := d.getHead() if tagNum != 0 && tagNum != 1 { - d.skip() // skip tag content - return time.Time{}, false, errors.New("cbor: wrong tag number for time.Time, got " + strconv.Itoa(int(tagNum)) + ", expect 0 or 1") + d.skip() // skip tag content + return time.Time{}, false, errors.New("cbor: wrong tag number for time.Time, got " + strconv.Itoa(int(tagNum)) + ", expect 0 or 1") //nolint:gosec } } } else { @@ -1815,10 +1818,10 @@ func (d *decoder) parseToTime() (time.Time, bool, error) { var f float64 switch ai { case additionalInformationAsFloat16: - f = float64(float16.Frombits(uint16(val)).Float32()) + f = float64(float16.Frombits(uint16(val)).Float32()) //nolint:gosec case additionalInformationAsFloat32: - f = float64(math.Float32frombits(uint32(val))) + f = float64(math.Float32frombits(uint32(val))) //nolint:gosec case additionalInformationAsFloat64: f = math.Float64frombits(val) @@ -1832,6 +1835,13 @@ func (d *decoder) parseToTime() (time.Time, bool, error) { return time.Time{}, true, nil } seconds, fractional := math.Modf(f) + if seconds > math.MaxInt64 || seconds < math.MinInt64 { + return time.Time{}, false, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: typeTime.String(), + errorMsg: fmt.Sprintf("%v overflows Go's int64", f), + } + } return time.Unix(int64(seconds), int64(fractional*1e9)), true, nil default: @@ -2145,14 +2155,14 @@ func (d *decoder) parse(skipSelfDescribedTag bool) (any, error) { //nolint:gocyc case cborTypePrimitives: _, ai, val := d.getHead() - if ai <= 24 && d.dm.simpleValues.rejected[SimpleValue(val)] { + if ai <= 24 && d.dm.simpleValues.rejected[SimpleValue(val)] { //nolint:gosec return nil, &UnacceptableDataItemError{ CBORType: t.String(), - Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized", + Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized", //nolint:gosec } } if ai < 20 || ai == 24 { - return SimpleValue(val), nil + return SimpleValue(val), nil //nolint:gosec } switch ai { @@ -2165,11 +2175,11 @@ func (d *decoder) parse(skipSelfDescribedTag bool) (any, error) { //nolint:gocyc return nil, nil case additionalInformationAsFloat16: - f := float64(float16.Frombits(uint16(val)).Float32()) + f := float64(float16.Frombits(uint16(val)).Float32()) //nolint:gosec return f, nil case additionalInformationAsFloat32: - f := float64(math.Float32frombits(uint32(val))) + f := float64(math.Float32frombits(uint32(val))) //nolint:gosec return f, nil case additionalInformationAsFloat64: @@ -2202,16 +2212,16 @@ func (d *decoder) parse(skipSelfDescribedTag bool) (any, error) { //nolint:gocyc func (d *decoder) parseByteString() ([]byte, bool) { _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() if !indefiniteLength { - b := d.data[d.off : d.off+int(val)] - d.off += int(val) + b := d.data[d.off : d.off+int(val)] //nolint:gosec + d.off += int(val) //nolint:gosec return b, false } - // Process indefinite length string chunks. + // Process indefinite-length string chunks. b := []byte{} for !d.foundBreak() { _, _, val = d.getHead() - b = append(b, d.data[d.off:d.off+int(val)]...) - d.off += int(val) + b = append(b, d.data[d.off:d.off+int(val)]...) //nolint:gosec + d.off += int(val) //nolint:gosec } return b, true } @@ -2300,19 +2310,19 @@ func (d *decoder) applyByteStringTextConversion( func (d *decoder) parseTextString() ([]byte, error) { _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() if !indefiniteLength { - b := d.data[d.off : d.off+int(val)] - d.off += int(val) + b := d.data[d.off : d.off+int(val)] //nolint:gosec + d.off += int(val) //nolint:gosec if d.dm.utf8 == UTF8RejectInvalid && !utf8.Valid(b) { return nil, &SemanticError{"cbor: invalid UTF-8 string"} } return b, nil } - // Process indefinite length string chunks. + // Process indefinite-length string chunks. b := []byte{} for !d.foundBreak() { _, _, val = d.getHead() - x := d.data[d.off : d.off+int(val)] - d.off += int(val) + x := d.data[d.off : d.off+int(val)] //nolint:gosec + d.off += int(val) //nolint:gosec if d.dm.utf8 == UTF8RejectInvalid && !utf8.Valid(x) { for !d.foundBreak() { d.skip() // Skip remaining chunk on error @@ -2327,7 +2337,7 @@ func (d *decoder) parseTextString() ([]byte, error) { func (d *decoder) parseArray() ([]any, error) { _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() hasSize := !indefiniteLength - count := int(val) + count := int(val) //nolint:gosec if !hasSize { count = d.numOfItemsUntilBreak() // peek ahead to get array size to preallocate slice for better performance } @@ -2349,7 +2359,7 @@ func (d *decoder) parseArray() ([]any, error) { func (d *decoder) parseArrayToSlice(v reflect.Value, tInfo *typeInfo) error { _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() hasSize := !indefiniteLength - count := int(val) + count := int(val) //nolint:gosec if !hasSize { count = d.numOfItemsUntilBreak() // peek ahead to get array size to preallocate slice for better performance } @@ -2371,7 +2381,7 @@ func (d *decoder) parseArrayToSlice(v reflect.Value, tInfo *typeInfo) error { func (d *decoder) parseArrayToArray(v reflect.Value, tInfo *typeInfo) error { _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() hasSize := !indefiniteLength - count := int(val) + count := int(val) //nolint:gosec gi := 0 vLen := v.Len() var err error @@ -2400,7 +2410,7 @@ func (d *decoder) parseArrayToArray(v reflect.Value, tInfo *typeInfo) error { func (d *decoder) parseMap() (any, error) { _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() hasSize := !indefiniteLength - count := int(val) + count := int(val) //nolint:gosec m := make(map[any]any) var k, e any var err, lastErr error @@ -2465,7 +2475,7 @@ func (d *decoder) parseMap() (any, error) { func (d *decoder) parseMapToMap(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() hasSize := !indefiniteLength - count := int(val) + count := int(val) //nolint:gosec if v.IsNil() { mapsize := count if !hasSize { @@ -2566,9 +2576,9 @@ func (d *decoder) parseMapToMap(v reflect.Value, tInfo *typeInfo) error { //noli } func (d *decoder) parseArrayToStruct(v reflect.Value, tInfo *typeInfo) error { - structType := getDecodingStructType(tInfo.nonPtrType) - if structType.err != nil { - return structType.err + structType, structTypeErr := getDecodingStructType(tInfo.nonPtrType) + if structTypeErr != nil { + return structTypeErr } if !structType.toArray { @@ -2584,7 +2594,7 @@ func (d *decoder) parseArrayToStruct(v reflect.Value, tInfo *typeInfo) error { start := d.off _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() hasSize := !indefiniteLength - count := int(val) + count := int(val) //nolint:gosec if !hasSize { count = d.numOfItemsUntilBreak() // peek ahead to get array size } @@ -2637,11 +2647,72 @@ func (d *decoder) parseArrayToStruct(v reflect.Value, tInfo *typeInfo) error { return err } -// parseMapToStruct needs to be fast so gocyclo can be ignored for now. +// skipMapEntriesFromIndex skips remaining map entries starting from index i. +func (d *decoder) skipMapEntriesFromIndex(i, count int, hasSize bool) { + for ; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + d.skip() + d.skip() + } +} + +// skipMapForDupKey skips the current map value and all remaining map entries, +// then returns a DupMapKeyError for the given key at map index i. +func (d *decoder) skipMapForDupKey(dupKey any, i, count int, hasSize bool) error { + // Skip the value of the duplicate key. + d.skip() + // Skip all remaining map entries. + d.skipMapEntriesFromIndex(i+1, count, hasSize) + return &DupMapKeyError{dupKey, i} +} + +// skipMapForUnknownField skips the current map value and all remaining map entries, +// then returns a UnknownFieldError for the given key at map index i. +func (d *decoder) skipMapForUnknownField(i, count int, hasSize bool) error { + // Skip the value of the unknown key. + d.skip() + // Skip all remaining map entries. + d.skipMapEntriesFromIndex(i+1, count, hasSize) + return &UnknownFieldError{i} +} + +// decodeToStructField decodes the next CBOR value into the struct field f in v. +// If the field cannot be resolved, the CBOR value is skipped. +func (d *decoder) decodeToStructField(v reflect.Value, f *decodingField, tInfo *typeInfo) error { + var fv reflect.Value + + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + var err error + fv, err = getFieldValue(v, f.idx, func(v reflect.Value) (reflect.Value, error) { + // Return a new value for embedded field null pointer to point to, or return error. + if !v.CanSet() { + return reflect.Value{}, errors.New("cbor: cannot set embedded pointer to unexported struct: " + v.Type().String()) + } + v.Set(reflect.New(v.Type().Elem())) + return v, nil + }) + if !fv.IsValid() { + d.skip() + return err + } + } + + err := d.parseToValue(fv, f.typInfo) + if err != nil { + if typeError, ok := err.(*UnmarshalTypeError); ok { + typeError.StructFieldName = tInfo.nonPtrType.String() + "." + f.name + } + return err + } + + return nil +} + func (d *decoder) parseMapToStruct(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo - structType := getDecodingStructType(tInfo.nonPtrType) - if structType.err != nil { - return structType.err + structType, structTypeErr := getDecodingStructType(tInfo.nonPtrType) + if structTypeErr != nil { + return structTypeErr } if structType.toArray { @@ -2654,14 +2725,12 @@ func (d *decoder) parseMapToStruct(v reflect.Value, tInfo *typeInfo) error { //n } } - var err, lastErr error - // Get CBOR map size _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() hasSize := !indefiniteLength - count := int(val) + count := int(val) //nolint:gosec - // Keeps track of matched struct fields + // Keep track of matched struct fields to detect duplicate map keys. var foundFldIdx []bool { const maxStackFields = 128 @@ -2675,99 +2744,80 @@ func (d *decoder) parseMapToStruct(v reflect.Value, tInfo *typeInfo) error { //n } } - // Keeps track of CBOR map keys to detect duplicate map key - keyCount := 0 - var mapKeys map[any]struct{} - - errOnUnknownField := (d.dm.extraReturnErrors & ExtraDecErrorUnknownField) > 0 + // Keep track of unmatched CBOR map keys to detect duplicate map keys. + var unmatchedMapKeys map[any]struct{} -MapEntryLoop: - for j := 0; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { - var f *field + var err error - // If duplicate field detection is enabled and the key at index j did not match any - // field, k will hold the map key. - var k any + caseInsensitive := d.dm.fieldNameMatching == FieldNameMatchingPreferCaseSensitive + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { t := d.nextCBORType() - if t == cborTypeTextString || (t == cborTypeByteString && d.dm.fieldNameByteString == FieldNameByteStringAllowed) { + + // Reclassify disallowed byte string keys so they fall to the default case. + // keyType is only used for branch control. + keyType := t + if t == cborTypeByteString && d.dm.fieldNameByteString != FieldNameByteStringAllowed { + keyType = 0xff + } + + switch keyType { + case cborTypeTextString, cborTypeByteString: var keyBytes []byte if t == cborTypeTextString { - keyBytes, lastErr = d.parseTextString() - if lastErr != nil { + var parseErr error + keyBytes, parseErr = d.parseTextString() + if parseErr != nil { if err == nil { - err = lastErr + err = parseErr } - d.skip() // skip value + d.skip() // Skip value continue } } else { // cborTypeByteString keyBytes, _ = d.parseByteString() } - // Check for exact match on field name. - if i, ok := structType.fieldIndicesByName[string(keyBytes)]; ok { - fld := structType.fields[i] + // Find matching struct field (exact match, then case-insensitive fallback). + if fldIdx, ok := findStructFieldByKey(structType, keyBytes, caseInsensitive); ok { + fld := structType.fields[fldIdx] - if !foundFldIdx[i] { - f = fld - foundFldIdx[i] = true - } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF { - err = &DupMapKeyError{fld.name, j} - d.skip() // skip value - j++ - // skip the rest of the map - for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { - d.skip() - d.skip() + switch checkDupField(d.dm, foundFldIdx, fldIdx) { + case mapActionParseValueAndContinue: + if fieldErr := d.decodeToStructField(v, fld, tInfo); fieldErr != nil && err == nil { + err = fieldErr } - return err - } else { - // discard repeated match + continue + case mapActionSkipAllAndReturnError: + return d.skipMapForDupKey(string(keyBytes), i, count, hasSize) + case mapActionSkipValueAndContinue: d.skip() - continue MapEntryLoop + continue } } - // Find field with case-insensitive match - if f == nil && d.dm.fieldNameMatching == FieldNameMatchingPreferCaseSensitive { - keyLen := len(keyBytes) - keyString := string(keyBytes) - for i := 0; i < len(structType.fields); i++ { - fld := structType.fields[i] - if len(fld.name) == keyLen && strings.EqualFold(fld.name, keyString) { - if !foundFldIdx[i] { - f = fld - foundFldIdx[i] = true - } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF { - err = &DupMapKeyError{keyString, j} - d.skip() // skip value - j++ - // skip the rest of the map - for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { - d.skip() - d.skip() - } - return err - } else { - // discard repeated match - d.skip() - continue MapEntryLoop - } - break - } - } + // No matching struct field found. + if unmatchedErr := handleUnmatchedMapKey(d, string(keyBytes), i, count, hasSize, &unmatchedMapKeys); unmatchedErr != nil { + return unmatchedErr } - if d.dm.dupMapKey == DupMapKeyEnforcedAPF && f == nil { - k = string(keyBytes) - } - } else if t <= cborTypeNegativeInt { // uint/int + case cborTypePositiveInt, cborTypeNegativeInt: var nameAsInt int64 if t == cborTypePositiveInt { _, _, val := d.getHead() - nameAsInt = int64(val) + if val > math.MaxInt64 { + if err == nil { + err = &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf(int64(0)).String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows Go's int64", + } + } + d.skip() // skip value + continue + } + nameAsInt = int64(val) //nolint:gosec } else { _, _, val := d.getHead() if val > math.MaxInt64 { @@ -2781,39 +2831,35 @@ MapEntryLoop: d.skip() // skip value continue } - nameAsInt = int64(-1) ^ int64(val) - } - - // Find field - for i := 0; i < len(structType.fields); i++ { - fld := structType.fields[i] - if fld.keyAsInt && fld.nameAsInt == nameAsInt { - if !foundFldIdx[i] { - f = fld - foundFldIdx[i] = true - } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF { - err = &DupMapKeyError{nameAsInt, j} - d.skip() // skip value - j++ - // skip the rest of the map - for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { - d.skip() - d.skip() - } - return err - } else { - // discard repeated match - d.skip() - continue MapEntryLoop + nameAsInt = int64(-1) ^ int64(val) //nolint:gosec + } + + // Find field by integer key + if fldIdx, ok := structType.fieldIndicesByIntKey[nameAsInt]; ok { + fld := structType.fields[fldIdx] + + switch checkDupField(d.dm, foundFldIdx, fldIdx) { + case mapActionParseValueAndContinue: + if fieldErr := d.decodeToStructField(v, fld, tInfo); fieldErr != nil && err == nil { + err = fieldErr } - break + continue + case mapActionSkipAllAndReturnError: + return d.skipMapForDupKey(nameAsInt, i, count, hasSize) + case mapActionSkipValueAndContinue: + d.skip() + continue } } - if d.dm.dupMapKey == DupMapKeyEnforcedAPF && f == nil { - k = nameAsInt + // No matching struct field found. + if unmatchedErr := handleUnmatchedMapKey(d, nameAsInt, i, count, hasSize, &unmatchedMapKeys); unmatchedErr != nil { + return unmatchedErr } - } else { + + default: + // CBOR map keys that can't be matched to any struct field. + if err == nil { err = &UnmarshalTypeError{ CBORType: t.String(), @@ -2821,97 +2867,31 @@ MapEntryLoop: errorMsg: "map key is of type " + t.String() + " and cannot be used to match struct field name", } } + + var otherKey any if d.dm.dupMapKey == DupMapKeyEnforcedAPF { // parse key - k, lastErr = d.parse(true) - if lastErr != nil { + var parseErr error + otherKey, parseErr = d.parse(true) + if parseErr != nil { d.skip() // skip value continue } // Detect if CBOR map key can be used as Go map key. - if !isHashableValue(reflect.ValueOf(k)) { + if !isHashableValue(reflect.ValueOf(otherKey)) { d.skip() // skip value continue } } else { d.skip() // skip key } - } - - if f == nil { - if errOnUnknownField { - err = &UnknownFieldError{j} - d.skip() // Skip value - j++ - // skip the rest of the map - for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { - d.skip() - d.skip() - } - return err - } - - // Two map keys that match the same struct field are immediately considered - // duplicates. This check detects duplicates between two map keys that do - // not match a struct field. If unknown field errors are enabled, then this - // check is never reached. - if d.dm.dupMapKey == DupMapKeyEnforcedAPF { - if mapKeys == nil { - mapKeys = make(map[any]struct{}, 1) - } - mapKeys[k] = struct{}{} - newKeyCount := len(mapKeys) - if newKeyCount == keyCount { - err = &DupMapKeyError{k, j} - d.skip() // skip value - j++ - // skip the rest of the map - for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { - d.skip() - d.skip() - } - return err - } - keyCount = newKeyCount - } - - d.skip() // Skip value - continue - } - - // Get field value by index - var fv reflect.Value - if len(f.idx) == 1 { - fv = v.Field(f.idx[0]) - } else { - fv, lastErr = getFieldValue(v, f.idx, func(v reflect.Value) (reflect.Value, error) { - // Return a new value for embedded field null pointer to point to, or return error. - if !v.CanSet() { - return reflect.Value{}, errors.New("cbor: cannot set embedded pointer to unexported struct: " + v.Type().String()) - } - v.Set(reflect.New(v.Type().Elem())) - return v, nil - }) - if lastErr != nil && err == nil { - err = lastErr - } - if !fv.IsValid() { - d.skip() - continue - } - } - if lastErr = d.parseToValue(fv, f.typInfo); lastErr != nil { - if err == nil { - if typeError, ok := lastErr.(*UnmarshalTypeError); ok { - typeError.StructFieldName = tInfo.nonPtrType.String() + "." + f.name - err = typeError - } else { - err = lastErr - } + if unmatchedErr := handleUnmatchedMapKey(d, otherKey, i, count, hasSize, &unmatchedMapKeys); unmatchedErr != nil { + return unmatchedErr } } } + return err } @@ -2958,15 +2938,15 @@ func (d *decoder) skip() { switch t { case cborTypeByteString, cborTypeTextString: - d.off += int(val) + d.off += int(val) //nolint:gosec case cborTypeArray: - for i := 0; i < int(val); i++ { + for i := 0; i < int(val); i++ { //nolint:gosec d.skip() } case cborTypeMap: - for i := 0; i < int(val)*2; i++ { + for i := 0; i < int(val)*2; i++ { //nolint:gosec d.skip() } diff --git a/vendor/github.com/fxamacker/cbor/v2/decode_map_utils.go b/vendor/github.com/fxamacker/cbor/v2/decode_map_utils.go new file mode 100644 index 000000000..3c8c423ad --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/decode_map_utils.go @@ -0,0 +1,98 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import "strings" + +// mapAction represents the next action during decoding a CBOR map to a Go struct. +type mapAction int + +const ( + mapActionParseValueAndContinue mapAction = iota // The caller should process the map value. + mapActionSkipValueAndContinue // The caller should skip the map value. + mapActionSkipAllAndReturnError // The caller should skip the rest of the map and return an error. +) + +// checkDupField checks if a struct field at index i has already been matched and returns the next action. +// If not matched, it marks the field as matched and returns mapActionParseValueAndContinue. +// If matched and DupMapKeyEnforcedAPF is specified in the given dm, it returns mapActionSkipAllAndReturnError. +// If matched and DupMapKeyEnforcedAPF is not specified in the given dm, it returns mapActionSkipValueAndContinue. +func checkDupField(dm *decMode, foundFldIdx []bool, i int) mapAction { + if !foundFldIdx[i] { + foundFldIdx[i] = true + return mapActionParseValueAndContinue + } + if dm.dupMapKey == DupMapKeyEnforcedAPF { + return mapActionSkipAllAndReturnError + } + return mapActionSkipValueAndContinue +} + +// findStructFieldByKey finds a struct field matching keyBytes by name. +// It tries an exact match first. If no exact match is found and +// caseInsensitive is true, it falls back to a case-insensitive search. +// findStructFieldByKey returns the field index and true, or -1 and false. +func findStructFieldByKey( + structType *decodingStructType, + keyBytes []byte, + caseInsensitive bool, +) (int, bool) { + if fldIdx, ok := structType.fieldIndicesByName[string(keyBytes)]; ok { + return fldIdx, true + } + if caseInsensitive { + return findFieldCaseInsensitive(structType.fields, string(keyBytes)) + } + return -1, false +} + +// findFieldCaseInsensitive returns the index of the first field whose name +// case-insensitively matches key, or -1 and false if no field matches. +func findFieldCaseInsensitive(flds decodingFields, key string) (int, bool) { + keyLen := len(key) + for i, f := range flds { + if f.keyAsInt { + continue + } + if len(f.name) == keyLen && strings.EqualFold(f.name, key) { + return i, true + } + } + return -1, false +} + +// handleUnmatchedMapKey handles a map entry whose key does not match any struct +// field. It can return UnknownFieldError or DupMapKeyError. +// handleUnmatchedMapKey consumes the CBOR value, so the caller doesn't need to skip any values. +// If an error is returned, the caller should abort parsing the map and return the error. +// If no error is returned, the caller should continue to process the next map pair. +func handleUnmatchedMapKey( + d *decoder, + key any, + i int, + count int, + hasSize bool, + // *map[any]struct{} is used here because we use lazy initialization for uks + uks *map[any]struct{}, //nolint:gocritic +) error { + errOnUnknownField := (d.dm.extraReturnErrors & ExtraDecErrorUnknownField) > 0 + + if errOnUnknownField { + return d.skipMapForUnknownField(i, count, hasSize) + } + + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + if *uks == nil { + *uks = make(map[any]struct{}) + } + if _, dup := (*uks)[key]; dup { + return d.skipMapForDupKey(key, i, count, hasSize) + } + (*uks)[key] = struct{}{} + } + + // Skip value. + d.skip() + return nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/diagnose.go b/vendor/github.com/fxamacker/cbor/v2/diagnose.go index 44afb8660..42a67ad11 100644 --- a/vendor/github.com/fxamacker/cbor/v2/diagnose.go +++ b/vendor/github.com/fxamacker/cbor/v2/diagnose.go @@ -51,11 +51,8 @@ const ( maxByteStringEncoding ) -func (bse ByteStringEncoding) valid() error { - if bse >= maxByteStringEncoding { - return errors.New("cbor: invalid ByteStringEncoding " + strconv.Itoa(int(bse))) - } - return nil +func (bse ByteStringEncoding) valid() bool { + return bse < maxByteStringEncoding } // DiagOptions specifies Diag options. @@ -104,8 +101,8 @@ func (opts DiagOptions) DiagMode() (DiagMode, error) { } func (opts DiagOptions) diagMode() (*diagMode, error) { - if err := opts.ByteStringEncoding.valid(); err != nil { - return nil, err + if !opts.ByteStringEncoding.valid() { + return nil, errors.New("cbor: invalid ByteStringEncoding " + strconv.Itoa(int(opts.ByteStringEncoding))) } decMode, err := DecOptions{ @@ -360,7 +357,7 @@ func (di *diagnose) item() error { //nolint:gocyclo case cborTypeArray: _, _, val := di.d.getHead() - count := int(val) + count := int(val) //nolint:gosec di.w.WriteByte('[') for i := 0; i < count; i++ { @@ -376,7 +373,7 @@ func (di *diagnose) item() error { //nolint:gocyclo case cborTypeMap: _, _, val := di.d.getHead() - count := int(val) + count := int(val) //nolint:gosec di.w.WriteByte('{') for i := 0; i < count; i++ { @@ -477,8 +474,8 @@ func (di *diagnose) item() error { //nolint:gocyclo func (di *diagnose) writeU16(val rune) { di.w.WriteString("\\u") var in [2]byte - in[0] = byte(val >> 8) - in[1] = byte(val) + in[0] = byte(val >> 8) //nolint:gosec + in[1] = byte(val) //nolint:gosec sz := hex.EncodedLen(len(in)) di.w.Grow(sz) dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] @@ -608,7 +605,7 @@ func (di *diagnose) encodeTextString(val string, quote byte) error { c, size := utf8.DecodeRuneInString(val[i:]) switch { - case c == utf8.RuneError: + case c == utf8.RuneError && size == 1: return &SemanticError{"cbor: invalid UTF-8 string"} case c < utf16SurrSelf: @@ -631,7 +628,7 @@ func (di *diagnose) encodeFloat(ai byte, val uint64) error { f64 := float64(0) switch ai { case additionalInformationAsFloat16: - f16 := float16.Frombits(uint16(val)) + f16 := float16.Frombits(uint16(val)) //nolint:gosec switch { case f16.IsNaN(): di.w.WriteString("NaN") @@ -647,7 +644,7 @@ func (di *diagnose) encodeFloat(ai byte, val uint64) error { } case additionalInformationAsFloat32: - f32 := math.Float32frombits(uint32(val)) + f32 := math.Float32frombits(uint32(val)) //nolint:gosec switch { case f32 != f32: di.w.WriteString("NaN") diff --git a/vendor/github.com/fxamacker/cbor/v2/encode.go b/vendor/github.com/fxamacker/cbor/v2/encode.go index c550617c3..e65a29d8a 100644 --- a/vendor/github.com/fxamacker/cbor/v2/encode.go +++ b/vendor/github.com/fxamacker/cbor/v2/encode.go @@ -30,7 +30,7 @@ import ( // If value implements the Marshaler interface, Marshal calls its // MarshalCBOR method. // -// If value implements encoding.BinaryMarshaler, Marhsal calls its +// If value implements encoding.BinaryMarshaler, Marshal calls its // MarshalBinary method and encode it as CBOR byte string. // // Boolean values encode as CBOR booleans (type 7). @@ -343,7 +343,7 @@ const ( // non-UTC timezone then a "localtime - UTC" numeric offset will be included as specified in RFC3339. // NOTE: User applications can avoid including the RFC3339 numeric offset by: // - providing a time.Time value set to UTC, or - // - using the TimeUnix, TimeUnixMicro, or TimeUnixDynamic option instead of TimeRFC3339. + // - using the TimeUnix, TimeUnixMicro, TimeUnixDynamic, or TimeRFC3339NanoUTC option. TimeRFC3339 // TimeRFC3339Nano causes time.Time to encode to a CBOR time (tag 0) with a text string content @@ -351,9 +351,13 @@ const ( // non-UTC timezone then a "localtime - UTC" numeric offset will be included as specified in RFC3339. // NOTE: User applications can avoid including the RFC3339 numeric offset by: // - providing a time.Time value set to UTC, or - // - using the TimeUnix, TimeUnixMicro, or TimeUnixDynamic option instead of TimeRFC3339Nano. + // - using the TimeUnix, TimeUnixMicro, TimeUnixDynamic, or TimeRFC3339NanoUTC option. TimeRFC3339Nano + // TimeRFC3339NanoUTC causes time.Time to encode to a CBOR time (tag 0) with a text string content + // representing UTC time using nanosecond precision in RFC3339 format. + TimeRFC3339NanoUTC + maxTimeMode ) @@ -436,7 +440,7 @@ const ( // FieldNameToTextString encodes struct fields to CBOR text string (major type 3). FieldNameToTextString FieldNameMode = iota - // FieldNameToTextString encodes struct fields to CBOR byte string (major type 2). + // FieldNameToByteString encodes struct fields to CBOR byte string (major type 2). FieldNameToByteString maxFieldNameMode @@ -567,7 +571,7 @@ type EncOptions struct { // RFC3339 format gets tag number 0, and numeric epoch time tag number 1. TimeTag EncTagMode - // IndefLength specifies whether to allow indefinite length CBOR items. + // IndefLength specifies whether to allow indefinite-length CBOR items. IndefLength IndefLengthMode // NilContainers specifies how to encode nil slices and maps. @@ -1132,10 +1136,11 @@ func encodeFloat(e *bytes.Buffer, em *encMode, v reflect.Value) error { if fopt == ShortestFloat16 { var f16 float16.Float16 p := float16.PrecisionFromfloat32(f32) - if p == float16.PrecisionExact { + switch p { + case float16.PrecisionExact: // Roundtrip float32->float16->float32 test isn't needed. f16 = float16.Fromfloat32(f32) - } else if p == float16.PrecisionUnknown { + case float16.PrecisionUnknown: // Try roundtrip float32->float16->float32 to determine if float32 can fit into float16. f16 = float16.Fromfloat32(f32) if f16.Float32() == f32 { @@ -1293,10 +1298,10 @@ func encodeByteString(e *bytes.Buffer, em *encMode, v reflect.Value) error { if slen == 0 { return e.WriteByte(byte(cborTypeByteString)) } - encodeHead(e, byte(cborTypeByteString), uint64(slen)) + encodeHead(e, byte(cborTypeByteString), uint64(slen)) //nolint:gosec if vk == reflect.Array { for i := 0; i < slen; i++ { - e.WriteByte(byte(v.Index(i).Uint())) + e.WriteByte(byte(v.Index(i).Uint())) //nolint:gosec } return nil } @@ -1333,7 +1338,7 @@ func (ae arrayEncodeFunc) encode(e *bytes.Buffer, em *encMode, v reflect.Value) if alen == 0 { return e.WriteByte(byte(cborTypeArray)) } - encodeHead(e, byte(cborTypeArray), uint64(alen)) + encodeHead(e, byte(cborTypeArray), uint64(alen)) //nolint:gosec for i := 0; i < alen; i++ { if err := ae.f(e, em, v.Index(i)); err != nil { return err @@ -1364,7 +1369,7 @@ func (me mapEncodeFunc) encode(e *bytes.Buffer, em *encMode, v reflect.Value) er return e.WriteByte(byte(cborTypeMap)) } - encodeHead(e, byte(cborTypeMap), uint64(mlen)) + encodeHead(e, byte(cborTypeMap), uint64(mlen)) //nolint:gosec if em.sort == SortNone || em.sort == SortFastShuffle || mlen <= 1 { return me.e(e, em, v, nil) } @@ -1427,7 +1432,7 @@ func (x *bytewiseKeyValueSorter) Swap(i, j int) { func (x *bytewiseKeyValueSorter) Less(i, j int) bool { kvi, kvj := x.kvs[i], x.kvs[j] - return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) <= 0 + return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) < 0 } type lengthFirstKeyValueSorter struct { @@ -1448,7 +1453,7 @@ func (x *lengthFirstKeyValueSorter) Less(i, j int) bool { if keyLengthDifference := (kvi.valueOffset - kvi.offset) - (kvj.valueOffset - kvj.offset); keyLengthDifference != 0 { return keyLengthDifference < 0 } - return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) <= 0 + return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) < 0 } var keyValuePool = sync.Pool{} @@ -1535,8 +1540,8 @@ func encodeStruct(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) { // Head is rewritten later if actual encoded field count is different from struct field count. encodedHeadLen := encodeHead(e, byte(cborTypeMap), uint64(len(flds))) - kvbegin := e.Len() - kvcount := 0 + kvBeginOffset := e.Len() + kvCount := 0 for offset := 0; offset < len(flds); offset++ { f := flds[(start+offset)%len(flds)] @@ -1582,10 +1587,10 @@ func encodeStruct(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) { return err } - kvcount++ + kvCount++ } - if len(flds) == kvcount { + if len(flds) == kvCount { // Encoded element count in head is the same as actual element count. return nil } @@ -1593,8 +1598,8 @@ func encodeStruct(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) { // Overwrite the bytes that were reserved for the head before encoding the map entries. var actualHeadLen int { - headbuf := *bytes.NewBuffer(e.Bytes()[kvbegin-encodedHeadLen : kvbegin-encodedHeadLen : kvbegin]) - actualHeadLen = encodeHead(&headbuf, byte(cborTypeMap), uint64(kvcount)) + headbuf := *bytes.NewBuffer(e.Bytes()[kvBeginOffset-encodedHeadLen : kvBeginOffset-encodedHeadLen : kvBeginOffset]) + actualHeadLen = encodeHead(&headbuf, byte(cborTypeMap), uint64(kvCount)) } if actualHeadLen == encodedHeadLen { @@ -1607,8 +1612,8 @@ func encodeStruct(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) { // encoded. The encoded entries are offset to the right by the number of excess reserved // bytes. Shift the entries left to remove the gap. excessReservedBytes := encodedHeadLen - actualHeadLen - dst := e.Bytes()[kvbegin-excessReservedBytes : e.Len()-excessReservedBytes] - src := e.Bytes()[kvbegin:e.Len()] + dst := e.Bytes()[kvBeginOffset-excessReservedBytes : e.Len()-excessReservedBytes] + src := e.Bytes()[kvBeginOffset:e.Len()] copy(dst, src) // After shifting, the excess bytes are at the end of the output buffer and they are @@ -1633,7 +1638,7 @@ func encodeTime(e *bytes.Buffer, em *encMode, v reflect.Value) error { } if em.timeTag == EncTagRequired { tagNumber := 1 - if em.time == TimeRFC3339 || em.time == TimeRFC3339Nano { + if em.time == TimeRFC3339 || em.time == TimeRFC3339Nano || em.time == TimeRFC3339NanoUTC { tagNumber = 0 } encodeHead(e, byte(cborTypeTag), uint64(tagNumber)) @@ -1650,7 +1655,7 @@ func encodeTime(e *bytes.Buffer, em *encMode, v reflect.Value) error { case TimeUnixDynamic: t = t.UTC().Round(time.Microsecond) - secs, nsecs := t.Unix(), uint64(t.Nanosecond()) + secs, nsecs := t.Unix(), uint64(t.Nanosecond()) //nolint:gosec if nsecs == 0 { return encodeInt(e, em, reflect.ValueOf(secs)) } @@ -1661,6 +1666,10 @@ func encodeTime(e *bytes.Buffer, em *encMode, v reflect.Value) error { s := t.Format(time.RFC3339) return encodeString(e, em, reflect.ValueOf(s)) + case TimeRFC3339NanoUTC: + s := t.UTC().Format(time.RFC3339Nano) + return encodeString(e, em, reflect.ValueOf(s)) + default: // TimeRFC3339Nano s := t.Format(time.RFC3339Nano) return encodeString(e, em, reflect.ValueOf(s)) diff --git a/vendor/github.com/fxamacker/cbor/v2/simplevalue.go b/vendor/github.com/fxamacker/cbor/v2/simplevalue.go index 30f72814f..9912e855c 100644 --- a/vendor/github.com/fxamacker/cbor/v2/simplevalue.go +++ b/vendor/github.com/fxamacker/cbor/v2/simplevalue.go @@ -93,6 +93,6 @@ func (sv *SimpleValue) unmarshalCBOR(data []byte) error { // It is safe to cast val to uint8 here because // - data is already verified to be well-formed CBOR simple value and // - val is <= math.MaxUint8. - *sv = SimpleValue(val) + *sv = SimpleValue(val) //nolint:gosec return nil } diff --git a/vendor/github.com/fxamacker/cbor/v2/stream.go b/vendor/github.com/fxamacker/cbor/v2/stream.go index 7ac6d7d67..da4c8f210 100644 --- a/vendor/github.com/fxamacker/cbor/v2/stream.go +++ b/vendor/github.com/fxamacker/cbor/v2/stream.go @@ -171,14 +171,20 @@ func NewEncoder(w io.Writer) *Encoder { // Encode writes the CBOR encoding of v. func (enc *Encoder) Encode(v any) error { - if len(enc.indefTypes) > 0 && v != nil { - indefType := enc.indefTypes[len(enc.indefTypes)-1] - if indefType == cborTypeTextString { + if len(enc.indefTypes) > 0 { + switch enc.indefTypes[len(enc.indefTypes)-1] { + case cborTypeTextString: + if v == nil { + return errors.New("cbor: cannot encode nil for indefinite-length text string") + } k := reflect.TypeOf(v).Kind() if k != reflect.String { return errors.New("cbor: cannot encode item type " + k.String() + " for indefinite-length text string") } - } else if indefType == cborTypeByteString { + case cborTypeByteString: + if v == nil { + return errors.New("cbor: cannot encode nil for indefinite-length byte string") + } t := reflect.TypeOf(v) k := t.Kind() if (k != reflect.Array && k != reflect.Slice) || t.Elem().Kind() != reflect.Uint8 { @@ -198,35 +204,35 @@ func (enc *Encoder) Encode(v any) error { return err } -// StartIndefiniteByteString starts byte string encoding of indefinite length. +// StartIndefiniteByteString starts indefinite-length byte string encoding. // Subsequent calls of (*Encoder).Encode() encodes definite length byte strings // ("chunks") as one contiguous string until EndIndefinite is called. func (enc *Encoder) StartIndefiniteByteString() error { return enc.startIndefinite(cborTypeByteString) } -// StartIndefiniteTextString starts text string encoding of indefinite length. +// StartIndefiniteTextString starts indefinite-length text string encoding. // Subsequent calls of (*Encoder).Encode() encodes definite length text strings // ("chunks") as one contiguous string until EndIndefinite is called. func (enc *Encoder) StartIndefiniteTextString() error { return enc.startIndefinite(cborTypeTextString) } -// StartIndefiniteArray starts array encoding of indefinite length. +// StartIndefiniteArray starts indefinite-length array encoding. // Subsequent calls of (*Encoder).Encode() encodes elements of the array // until EndIndefinite is called. func (enc *Encoder) StartIndefiniteArray() error { return enc.startIndefinite(cborTypeArray) } -// StartIndefiniteMap starts array encoding of indefinite length. +// StartIndefiniteMap starts indefinite-length map encoding. // Subsequent calls of (*Encoder).Encode() encodes elements of the map // until EndIndefinite is called. func (enc *Encoder) StartIndefiniteMap() error { return enc.startIndefinite(cborTypeMap) } -// EndIndefinite closes last opened indefinite length value. +// EndIndefinite closes last opened indefinite-length value. func (enc *Encoder) EndIndefinite() error { if len(enc.indefTypes) == 0 { return errors.New("cbor: cannot encode \"break\" code outside indefinite length values") @@ -238,18 +244,22 @@ func (enc *Encoder) EndIndefinite() error { return err } -var cborIndefHeader = map[cborType][]byte{ - cborTypeByteString: {cborByteStringWithIndefiniteLengthHead}, - cborTypeTextString: {cborTextStringWithIndefiniteLengthHead}, - cborTypeArray: {cborArrayWithIndefiniteLengthHead}, - cborTypeMap: {cborMapWithIndefiniteLengthHead}, -} - func (enc *Encoder) startIndefinite(typ cborType) error { if enc.em.indefLength == IndefLengthForbidden { return &IndefiniteLengthError{typ} } - _, err := enc.w.Write(cborIndefHeader[typ]) + var head byte + switch typ { + case cborTypeByteString: + head = cborByteStringWithIndefiniteLengthHead + case cborTypeTextString: + head = cborTextStringWithIndefiniteLengthHead + case cborTypeArray: + head = cborArrayWithIndefiniteLengthHead + case cborTypeMap: + head = cborMapWithIndefiniteLengthHead + } + _, err := enc.w.Write([]byte{head}) if err == nil { enc.indefTypes = append(enc.indefTypes, typ) } @@ -262,7 +272,9 @@ type RawMessage []byte // MarshalCBOR returns m or CBOR nil if m is nil. func (m RawMessage) MarshalCBOR() ([]byte, error) { if len(m) == 0 { - return cborNil, nil + b := make([]byte, len(cborNil)) + copy(b, cborNil) + return b, nil } return m, nil } diff --git a/vendor/github.com/fxamacker/cbor/v2/structfields.go b/vendor/github.com/fxamacker/cbor/v2/structfields.go index cf0a922cd..b2d71f2e9 100644 --- a/vendor/github.com/fxamacker/cbor/v2/structfields.go +++ b/vendor/github.com/fxamacker/cbor/v2/structfields.go @@ -6,27 +6,43 @@ package cbor import ( "reflect" "sort" + "strconv" "strings" ) +// field holds shared struct field metadata returned by getFields(). type field struct { - name string - nameAsInt int64 // used to decoder to match field name with CBOR int + name string + nameAsInt int64 // used to match field name with CBOR int + idx []int + typ reflect.Type // used during cache building only + keyAsInt bool // used to encode/decode field name as int + tagged bool // used to choose dominant field (at the same level tagged fields dominate untagged fields) + omitEmpty bool // used to skip empty field + omitZero bool // used to skip zero field +} + +type fields []*field + +// encodingField extends field with encoding-specific data. +type encodingField struct { + field cborName []byte - cborNameByteString []byte // major type 2 name encoding iff cborName has major type 3 - idx []int - typ reflect.Type + cborNameByteString []byte // major type 2 name encoding if cborName has major type 3 ef encodeFunc ief isEmptyFunc izf isZeroFunc - typInfo *typeInfo // used to decoder to reuse type info - tagged bool // used to choose dominant field (at the same level tagged fields dominate untagged fields) - omitEmpty bool // used to skip empty field - omitZero bool // used to skip zero field - keyAsInt bool // used to encode/decode field name as int } -type fields []*field +type encodingFields []*encodingField + +// decodingField extends field with decoding-specific data. +type decodingField struct { + field + typInfo *typeInfo // used by decoder to reuse type info +} + +type decodingFields []*decodingField // indexFieldSorter sorts fields by field idx at each level, breaking ties with idx depth. type indexFieldSorter struct { @@ -48,7 +64,7 @@ func (x *indexFieldSorter) Less(i, j int) bool { return iIdx[k] < jIdx[k] } } - return len(iIdx) <= len(jIdx) + return len(iIdx) < len(jIdx) } // nameLevelAndTagFieldSorter sorts fields by field name, idx depth, and presence of tag. @@ -69,6 +85,10 @@ func (x *nameLevelAndTagFieldSorter) Less(i, j int) bool { if fi.name != fj.name { return fi.name < fj.name } + // Fields with the same name but different keyAsInt are in separate namespaces. + if fi.keyAsInt != fj.keyAsInt { + return fi.keyAsInt + } if len(fi.idx) != len(fj.idx) { return len(fi.idx) < len(fj.idx) } @@ -117,22 +137,37 @@ func getFields(t reflect.Type) (flds fields, structOptions string) { } } + // Normalize keyasint field names to their canonical integer string form. + // This ensures that "01", "+1", and "1" are treated as the same key + // during deduplication. + for _, f := range flds { + if f.keyAsInt { + nameAsInt, err := strconv.Atoi(f.name) + if err != nil { + continue // Leave invalid names for callers to report. + } + f.nameAsInt = int64(nameAsInt) + f.name = strconv.Itoa(nameAsInt) + } + } + sort.Sort(&nameLevelAndTagFieldSorter{flds}) // Keep visible fields. j := 0 // index of next unique field for i := 0; i < len(flds); { name := flds[i].name + keyAsInt := flds[i].keyAsInt if i == len(flds)-1 || // last field - name != flds[i+1].name || // field i has unique field name + name != flds[i+1].name || flds[i+1].keyAsInt != keyAsInt || // field i has unique (name, keyAsInt) len(flds[i].idx) < len(flds[i+1].idx) || // field i is at a less nested level than field i+1 (flds[i].tagged && !flds[i+1].tagged) { // field i is tagged while field i+1 is not flds[j] = flds[i] j++ } - // Skip fields with the same field name. - for i++; i < len(flds) && name == flds[i].name; i++ { //nolint:revive + // Skip fields with the same (name, keyAsInt). + for i++; i < len(flds) && name == flds[i].name && keyAsInt == flds[i].keyAsInt; i++ { //nolint:revive } } if j != len(flds) { diff --git a/vendor/github.com/fxamacker/cbor/v2/valid.go b/vendor/github.com/fxamacker/cbor/v2/valid.go index b40793b95..850b95019 100644 --- a/vendor/github.com/fxamacker/cbor/v2/valid.go +++ b/vendor/github.com/fxamacker/cbor/v2/valid.go @@ -54,7 +54,7 @@ func (e *MaxMapPairsError) Error() string { return "cbor: exceeded max number of key-value pairs " + strconv.Itoa(e.maxMapPairs) + " for CBOR map" } -// IndefiniteLengthError indicates found disallowed indefinite length items. +// IndefiniteLengthError indicates found disallowed indefinite-length items. type IndefiniteLengthError struct { t cborType } @@ -113,7 +113,7 @@ func (d *decoder) wellformedInternal(depth int, checkBuiltinTags bool) (int, err } return d.wellformedIndefiniteString(t, depth, checkBuiltinTags) } - valInt := int(val) + valInt := int(val) //nolint:gosec if valInt < 0 { // Detect integer overflow return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, causing integer overflow") @@ -136,7 +136,7 @@ func (d *decoder) wellformedInternal(depth int, checkBuiltinTags bool) (int, err return d.wellformedIndefiniteArrayOrMap(t, depth, checkBuiltinTags) } - valInt := int(val) + valInt := int(val) //nolint:gosec if valInt < 0 { // Detect integer overflow return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, it would cause integer overflow") @@ -212,7 +212,7 @@ func (d *decoder) wellformedInternal(depth int, checkBuiltinTags bool) (int, err return depth, nil } -// wellformedIndefiniteString checks indefinite length byte/text string's well-formedness and returns max depth and error. +// wellformedIndefiniteString checks indefinite-length byte/text string's well-formedness and returns max depth and error. func (d *decoder) wellformedIndefiniteString(t cborType, depth int, checkBuiltinTags bool) (int, error) { var err error for { @@ -223,7 +223,7 @@ func (d *decoder) wellformedIndefiniteString(t cborType, depth int, checkBuiltin d.off++ break } - // Peek ahead to get next type and indefinite length status. + // Peek ahead to get next type and indefinite-length status. nt, ai := parseInitialByte(d.data[d.off]) if t != nt { return 0, &SyntaxError{"cbor: wrong element type " + nt.String() + " for indefinite-length " + t.String()} @@ -238,7 +238,7 @@ func (d *decoder) wellformedIndefiniteString(t cborType, depth int, checkBuiltin return depth, nil } -// wellformedIndefiniteArrayOrMap checks indefinite length array/map's well-formedness and returns max depth and error. +// wellformedIndefiniteArrayOrMap checks indefinite-length array/map's well-formedness and returns max depth and error. func (d *decoder) wellformedIndefiniteArrayOrMap(t cborType, depth int, checkBuiltinTags bool) (int, error) { var err error maxDepth := depth @@ -326,7 +326,7 @@ func (d *decoder) wellformedHead() (t cborType, ai byte, val uint64, err error) val = uint64(binary.BigEndian.Uint16(d.data[d.off : d.off+argumentSize])) d.off += argumentSize if t == cborTypePrimitives { - if err := d.acceptableFloat(float64(float16.Frombits(uint16(val)).Float32())); err != nil { + if err := d.acceptableFloat(float64(float16.Frombits(uint16(val)).Float32())); err != nil { //nolint:gosec return 0, 0, 0, err } } @@ -341,7 +341,7 @@ func (d *decoder) wellformedHead() (t cborType, ai byte, val uint64, err error) val = uint64(binary.BigEndian.Uint32(d.data[d.off : d.off+argumentSize])) d.off += argumentSize if t == cborTypePrimitives { - if err := d.acceptableFloat(float64(math.Float32frombits(uint32(val)))); err != nil { + if err := d.acceptableFloat(float64(math.Float32frombits(uint32(val)))); err != nil { //nolint:gosec return 0, 0, 0, err } } @@ -379,12 +379,12 @@ func (d *decoder) wellformedHead() (t cborType, ai byte, val uint64, err error) func (d *decoder) acceptableFloat(f float64) error { switch { - case d.dm.nanDec == NaNDecodeForbidden && math.IsNaN(f): + case d.dm.nan == NaNDecodeForbidden && math.IsNaN(f): return &UnacceptableDataItemError{ CBORType: cborTypePrimitives.String(), Message: "floating-point NaN", } - case d.dm.infDec == InfDecodeForbidden && math.IsInf(f, 0): + case d.dm.inf == InfDecodeForbidden && math.IsInf(f, 0): return &UnacceptableDataItemError{ CBORType: cborTypePrimitives.String(), Message: "floating-point infinity", diff --git a/vendor/github.com/go-openapi/jsonpointer/.gitignore b/vendor/github.com/go-openapi/jsonpointer/.gitignore index 59cd29489..885dc27ab 100644 --- a/vendor/github.com/go-openapi/jsonpointer/.gitignore +++ b/vendor/github.com/go-openapi/jsonpointer/.gitignore @@ -2,3 +2,5 @@ *.cov .idea .env +.mcp.json +.claude/ diff --git a/vendor/github.com/go-openapi/jsonpointer/.golangci.yml b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml index fdae591bc..dc7c96053 100644 --- a/vendor/github.com/go-openapi/jsonpointer/.golangci.yml +++ b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml @@ -12,6 +12,7 @@ linters: - paralleltest - recvcheck - testpackage + - thelper - tparallel - varnamelen - whitespace diff --git a/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md index 9322b065e..bac878f21 100644 --- a/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md +++ b/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md @@ -23,7 +23,9 @@ include: Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or + advances + * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic @@ -55,7 +57,7 @@ further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All +reported by contacting the project team at . All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. @@ -68,7 +70,7 @@ members of the project's leadership. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] +available at [][version] [homepage]: http://contributor-covenant.org [version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/jsonpointer/CONTRIBUTORS.md b/vendor/github.com/go-openapi/jsonpointer/CONTRIBUTORS.md index 03c098316..2ebebedc1 100644 --- a/vendor/github.com/go-openapi/jsonpointer/CONTRIBUTORS.md +++ b/vendor/github.com/go-openapi/jsonpointer/CONTRIBUTORS.md @@ -4,21 +4,21 @@ | Total Contributors | Total Contributions | | --- | --- | -| 12 | 95 | +| 12 | 101 | | Username | All Time Contribution Count | All Commits | | --- | --- | --- | -| @fredbi | 48 | https://github.com/go-openapi/jsonpointer/commits?author=fredbi | -| @casualjim | 33 | https://github.com/go-openapi/jsonpointer/commits?author=casualjim | -| @magodo | 3 | https://github.com/go-openapi/jsonpointer/commits?author=magodo | -| @youyuanwu | 3 | https://github.com/go-openapi/jsonpointer/commits?author=youyuanwu | -| @gaiaz-iusipov | 1 | https://github.com/go-openapi/jsonpointer/commits?author=gaiaz-iusipov | -| @gbjk | 1 | https://github.com/go-openapi/jsonpointer/commits?author=gbjk | -| @gordallott | 1 | https://github.com/go-openapi/jsonpointer/commits?author=gordallott | -| @ianlancetaylor | 1 | https://github.com/go-openapi/jsonpointer/commits?author=ianlancetaylor | -| @mfleader | 1 | https://github.com/go-openapi/jsonpointer/commits?author=mfleader | -| @Neo2308 | 1 | https://github.com/go-openapi/jsonpointer/commits?author=Neo2308 | -| @olivierlemasle | 1 | https://github.com/go-openapi/jsonpointer/commits?author=olivierlemasle | -| @testwill | 1 | https://github.com/go-openapi/jsonpointer/commits?author=testwill | +| @fredbi | 54 | | +| @casualjim | 33 | | +| @magodo | 3 | | +| @youyuanwu | 3 | | +| @gaiaz-iusipov | 1 | | +| @gbjk | 1 | | +| @gordallott | 1 | | +| @ianlancetaylor | 1 | | +| @mfleader | 1 | | +| @Neo2308 | 1 | | +| @olivierlemasle | 1 | | +| @testwill | 1 | | _this file was generated by the [Contributors GitHub Action](https://github.com/github/contributors)_ diff --git a/vendor/github.com/go-openapi/jsonpointer/README.md b/vendor/github.com/go-openapi/jsonpointer/README.md index b61b63fd9..c52803e2e 100644 --- a/vendor/github.com/go-openapi/jsonpointer/README.md +++ b/vendor/github.com/go-openapi/jsonpointer/README.md @@ -8,12 +8,22 @@ [![Release][release-badge]][release-url] [![Go Report Card][gocard-badge]][gocard-url] [![CodeFactor Grade][codefactor-badge]][codefactor-url] [![License][license-badge]][license-url] -[![GoDoc][godoc-badge]][godoc-url] [![Slack Channel][slack-logo]![slack-badge]][slack-url] [![go version][goversion-badge]][goversion-url] ![Top language][top-badge] ![Commits since latest release][commits-badge] +[![GoDoc][godoc-badge]][godoc-url] [![Discord Channel][discord-badge]][discord-url] [![go version][goversion-badge]][goversion-url] ![Top language][top-badge] ![Commits since latest release][commits-badge] --- An implementation of JSON Pointer for golang, which supports go `struct`. +## Announcements + +* **2025-12-19** : new community chat on discord + * a new discord community channel is available to be notified of changes and support users + * our venerable Slack channel remains open, and will be eventually discontinued on **2026-03-31** + +You may join the discord community by clicking the invite link on the discord badge (also above). [![Discord Channel][discord-badge]][discord-url] + +Or join our Slack channel: [![Slack Channel][slack-logo]![slack-badge]][slack-url] + ## Status API is stable. @@ -124,21 +134,20 @@ Maintainers can cut a new release by either: [release-badge]: https://badge.fury.io/gh/go-openapi%2Fjsonpointer.svg [release-url]: https://badge.fury.io/gh/go-openapi%2Fjsonpointer -[gomod-badge]: https://badge.fury.io/go/github.com%2Fgo-openapi%2Fjsonpointer.svg -[gomod-url]: https://badge.fury.io/go/github.com%2Fgo-openapi%2Fjsonpointer [gocard-badge]: https://goreportcard.com/badge/github.com/go-openapi/jsonpointer [gocard-url]: https://goreportcard.com/report/github.com/go-openapi/jsonpointer [codefactor-badge]: https://img.shields.io/codefactor/grade/github/go-openapi/jsonpointer [codefactor-url]: https://www.codefactor.io/repository/github/go-openapi/jsonpointer -[doc-badge]: https://img.shields.io/badge/doc-site-blue?link=https%3A%2F%2Fgoswagger.io%2Fgo-openapi%2F -[doc-url]: https://goswagger.io/go-openapi [godoc-badge]: https://pkg.go.dev/badge/github.com/go-openapi/jsonpointer [godoc-url]: http://pkg.go.dev/github.com/go-openapi/jsonpointer [slack-logo]: https://a.slack-edge.com/e6a93c1/img/icons/favicon-32.png [slack-badge]: https://img.shields.io/badge/slack-blue?link=https%3A%2F%2Fgoswagger.slack.com%2Farchives%2FC04R30YM [slack-url]: https://goswagger.slack.com/archives/C04R30YMU +[discord-badge]: https://img.shields.io/discord/1446918742398341256?logo=discord&label=discord&color=blue +[discord-url]: https://discord.gg/twZ9BwT3 + [license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg [license-url]: https://github.com/go-openapi/jsonpointer/?tab=Apache-2.0-1-ov-file#readme diff --git a/vendor/github.com/go-openapi/jsonpointer/SECURITY.md b/vendor/github.com/go-openapi/jsonpointer/SECURITY.md index 2a7b6f091..1fea2c573 100644 --- a/vendor/github.com/go-openapi/jsonpointer/SECURITY.md +++ b/vendor/github.com/go-openapi/jsonpointer/SECURITY.md @@ -6,14 +6,32 @@ This policy outlines the commitment and practices of the go-openapi maintainers | Version | Supported | | ------- | ------------------ | -| 0.22.x | :white_check_mark: | +| O.x | :white_check_mark: | + +## Vulnerability checks in place + +This repository uses automated vulnerability scans, at every merged commit and at least once a week. + +We use: + +* [`GitHub CodeQL`][codeql-url] +* [`trivy`][trivy-url] +* [`govulncheck`][govulncheck-url] + +Reports are centralized in github security reports and visible only to the maintainers. ## Reporting a vulnerability If you become aware of a security vulnerability that affects the current repository, -please report it privately to the maintainers. +**please report it privately to the maintainers** +rather than opening a publicly visible GitHub issue. + +Please follow the instructions provided by github to [Privately report a security vulnerability][github-guidance-url]. -Please follow the instructions provided by github to -[Privately report a security vulnerability](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability). +> [!NOTE] +> On Github, navigate to the project's "Security" tab then click on "Report a vulnerability". -TL;DR: on Github, navigate to the project's "Security" tab then click on "Report a vulnerability". +[codeql-url]: https://github.com/github/codeql +[trivy-url]: https://trivy.dev/docs/latest/getting-started +[govulncheck-url]: https://go.dev/blog/govulncheck +[github-guidance-url]: https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability diff --git a/vendor/github.com/go-openapi/jsonreference/.cliff.toml b/vendor/github.com/go-openapi/jsonreference/.cliff.toml deleted file mode 100644 index 702629f5d..000000000 --- a/vendor/github.com/go-openapi/jsonreference/.cliff.toml +++ /dev/null @@ -1,181 +0,0 @@ -# git-cliff ~ configuration file -# https://git-cliff.org/docs/configuration - -[changelog] -header = """ -""" - -footer = """ - ------ - -**[{{ remote.github.repo }}]({{ self::remote_url() }}) license terms** - -[![License][license-badge]][license-url] - -[license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg -[license-url]: {{ self::remote_url() }}/?tab=Apache-2.0-1-ov-file#readme - -{%- macro remote_url() -%} - https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }} -{%- endmacro -%} -""" - -body = """ -{%- if version %} -## [{{ version | trim_start_matches(pat="v") }}]({{ self::remote_url() }}/tree/{{ version }}) - {{ timestamp | date(format="%Y-%m-%d") }} -{%- else %} -## [unreleased] -{%- endif %} -{%- if message %} - {%- raw %}\n{% endraw %} -{{ message }} - {%- raw %}\n{% endraw %} -{%- endif %} -{%- if version %} - {%- if previous.version %} - -**Full Changelog**: <{{ self::remote_url() }}/compare/{{ previous.version }}...{{ version }}> - {%- endif %} -{%- else %} - {%- raw %}\n{% endraw %} -{%- endif %} - -{%- if statistics %}{% if statistics.commit_count %} - {%- raw %}\n{% endraw %} -{{ statistics.commit_count }} commits in this release. - {%- raw %}\n{% endraw %} -{%- endif %}{% endif %} ------ - -{%- for group, commits in commits | group_by(attribute="group") %} - {%- raw %}\n{% endraw %} -### {{ group | upper_first }} - {%- raw %}\n{% endraw %} - {%- for commit in commits %} - {%- if commit.remote.pr_title %} - {%- set commit_message = commit.remote.pr_title %} - {%- else %} - {%- set commit_message = commit.message %} - {%- endif %} -* {{ commit_message | split(pat="\n") | first | trim }} - {%- if commit.remote.username %} -{%- raw %} {% endraw %}by [@{{ commit.remote.username }}](https://github.com/{{ commit.remote.username }}) - {%- endif %} - {%- if commit.remote.pr_number %} -{%- raw %} {% endraw %}in [#{{ commit.remote.pr_number }}]({{ self::remote_url() }}/pull/{{ commit.remote.pr_number }}) - {%- endif %} -{%- raw %} {% endraw %}[...]({{ self::remote_url() }}/commit/{{ commit.id }}) - {%- endfor %} -{%- endfor %} - -{%- if github %} -{%- raw %}\n{% endraw -%} - {%- set all_contributors = github.contributors | length %} - {%- if github.contributors | filter(attribute="username", value="dependabot[bot]") | length < all_contributors %} ------ - -### People who contributed to this release - {% endif %} - {%- for contributor in github.contributors | filter(attribute="username") | sort(attribute="username") %} - {%- if contributor.username != "dependabot[bot]" and contributor.username != "github-actions[bot]" %} -* [@{{ contributor.username }}](https://github.com/{{ contributor.username }}) - {%- endif %} - {%- endfor %} - - {% if github.contributors | filter(attribute="is_first_time", value=true) | length != 0 %} ------ - {%- raw %}\n{% endraw %} - -### New Contributors - {%- endif %} - - {%- for contributor in github.contributors | filter(attribute="is_first_time", value=true) %} - {%- if contributor.username != "dependabot[bot]" and contributor.username != "github-actions[bot]" %} -* @{{ contributor.username }} made their first contribution - {%- if contributor.pr_number %} - in [#{{ contributor.pr_number }}]({{ self::remote_url() }}/pull/{{ contributor.pr_number }}) \ - {%- endif %} - {%- endif %} - {%- endfor %} -{%- endif %} - -{%- raw %}\n{% endraw %} - -{%- macro remote_url() -%} - https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }} -{%- endmacro -%} -""" -# Remove leading and trailing whitespaces from the changelog's body. -trim = true -# Render body even when there are no releases to process. -render_always = true -# An array of regex based postprocessors to modify the changelog. -postprocessors = [ - # Replace the placeholder with a URL. - #{ pattern = '', replace = "https://github.com/orhun/git-cliff" }, -] -# output file path -# output = "test.md" - -[git] -# Parse commits according to the conventional commits specification. -# See https://www.conventionalcommits.org -conventional_commits = false -# Exclude commits that do not match the conventional commits specification. -filter_unconventional = false -# Require all commits to be conventional. -# Takes precedence over filter_unconventional. -require_conventional = false -# Split commits on newlines, treating each line as an individual commit. -split_commits = false -# An array of regex based parsers to modify commit messages prior to further processing. -commit_preprocessors = [ - # Replace issue numbers with link templates to be updated in `changelog.postprocessors`. - #{ pattern = '\((\w+\s)?#([0-9]+)\)', replace = "([#${2}](/issues/${2}))"}, - # Check spelling of the commit message using https://github.com/crate-ci/typos. - # If the spelling is incorrect, it will be fixed automatically. - #{ pattern = '.*', replace_command = 'typos --write-changes -' } -] -# Prevent commits that are breaking from being excluded by commit parsers. -protect_breaking_commits = false -# An array of regex based parsers for extracting data from the commit message. -# Assigns commits to groups. -# Optionally sets the commit's scope and can decide to exclude commits from further processing. -commit_parsers = [ - { message = "^[Cc]hore\\([Rr]elease\\): prepare for", skip = true }, - { message = "(^[Mm]erge)|([Mm]erge conflict)", skip = true }, - { field = "author.name", pattern = "dependabot*", group = "Updates" }, - { message = "([Ss]ecurity)|([Vv]uln)", group = "Security" }, - { body = "(.*[Ss]ecurity)|([Vv]uln)", group = "Security" }, - { message = "([Cc]hore\\(lint\\))|(style)|(lint)|(codeql)|(golangci)", group = "Code quality" }, - { message = "(^[Dd]oc)|((?i)readme)|(badge)|(typo)|(documentation)", group = "Documentation" }, - { message = "(^[Ff]eat)|(^[Ee]nhancement)", group = "Implemented enhancements" }, - { message = "(^ci)|(\\(ci\\))|(fixup\\s+ci)|(fix\\s+ci)|(license)|(example)", group = "Miscellaneous tasks" }, - { message = "^test", group = "Testing" }, - { message = "(^fix)|(panic)", group = "Fixed bugs" }, - { message = "(^refact)|(rework)", group = "Refactor" }, - { message = "(^[Pp]erf)|(performance)", group = "Performance" }, - { message = "(^[Cc]hore)", group = "Miscellaneous tasks" }, - { message = "^[Rr]evert", group = "Reverted changes" }, - { message = "(upgrade.*?go)|(go\\s+version)", group = "Updates" }, - { message = ".*", group = "Other" }, -] -# Exclude commits that are not matched by any commit parser. -filter_commits = false -# An array of link parsers for extracting external references, and turning them into URLs, using regex. -link_parsers = [] -# Include only the tags that belong to the current branch. -use_branch_tags = false -# Order releases topologically instead of chronologically. -topo_order = false -# Order releases topologically instead of chronologically. -topo_order_commits = true -# Order of commits in each group/release within the changelog. -# Allowed values: newest, oldest -sort_commits = "newest" -# Process submodules commits -recurse_submodules = false - -#[remote.github] -#owner = "go-openapi" diff --git a/vendor/github.com/go-openapi/jsonreference/.gitignore b/vendor/github.com/go-openapi/jsonreference/.gitignore index 769c24400..885dc27ab 100644 --- a/vendor/github.com/go-openapi/jsonreference/.gitignore +++ b/vendor/github.com/go-openapi/jsonreference/.gitignore @@ -1 +1,6 @@ -secrets.yml +*.out +*.cov +.idea +.env +.mcp.json +.claude/ diff --git a/vendor/github.com/go-openapi/jsonreference/.golangci.yml b/vendor/github.com/go-openapi/jsonreference/.golangci.yml index fdae591bc..dc7c96053 100644 --- a/vendor/github.com/go-openapi/jsonreference/.golangci.yml +++ b/vendor/github.com/go-openapi/jsonreference/.golangci.yml @@ -12,6 +12,7 @@ linters: - paralleltest - recvcheck - testpackage + - thelper - tparallel - varnamelen - whitespace diff --git a/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md index 9322b065e..bac878f21 100644 --- a/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md +++ b/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md @@ -23,7 +23,9 @@ include: Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or + advances + * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic @@ -55,7 +57,7 @@ further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All +reported by contacting the project team at . All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. @@ -68,7 +70,7 @@ members of the project's leadership. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] +available at [][version] [homepage]: http://contributor-covenant.org [version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/jsonreference/CONTRIBUTORS.md b/vendor/github.com/go-openapi/jsonreference/CONTRIBUTORS.md index 9907d5d21..7faeb83a7 100644 --- a/vendor/github.com/go-openapi/jsonreference/CONTRIBUTORS.md +++ b/vendor/github.com/go-openapi/jsonreference/CONTRIBUTORS.md @@ -4,11 +4,11 @@ | Total Contributors | Total Contributions | | --- | --- | -| 9 | 68 | +| 9 | 73 | | Username | All Time Contribution Count | All Commits | | --- | --- | --- | -| @fredbi | 31 | https://github.com/go-openapi/jsonreference/commits?author=fredbi | +| @fredbi | 36 | https://github.com/go-openapi/jsonreference/commits?author=fredbi | | @casualjim | 25 | https://github.com/go-openapi/jsonreference/commits?author=casualjim | | @youyuanwu | 5 | https://github.com/go-openapi/jsonreference/commits?author=youyuanwu | | @olivierlemasle | 2 | https://github.com/go-openapi/jsonreference/commits?author=olivierlemasle | diff --git a/vendor/github.com/go-openapi/jsonreference/NOTICE b/vendor/github.com/go-openapi/jsonreference/NOTICE index f3b51939a..814e87ef8 100644 --- a/vendor/github.com/go-openapi/jsonreference/NOTICE +++ b/vendor/github.com/go-openapi/jsonreference/NOTICE @@ -3,7 +3,7 @@ Copyright 2015-2025 go-swagger maintainers // SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers // SPDX-License-Identifier: Apache-2.0 -This software library, github.com/go-openapi/jsonpointer, includes software developed +This software library, github.com/go-openapi/jsonreference, includes software developed by the go-swagger and go-openapi maintainers ("go-swagger maintainers"). Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,7 +18,7 @@ It ships with copies of other software which license terms are recalled below. The original software was authored on 25-02-2013 by sigu-399 (https://github.com/sigu-399, sigu.399@gmail.com). -github.com/sigh-399/jsonpointer +github.com/sigh-399/jsonreference =========================== // SPDX-FileCopyrightText: Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) diff --git a/vendor/github.com/go-openapi/jsonreference/README.md b/vendor/github.com/go-openapi/jsonreference/README.md index d479dbdc7..adea16061 100644 --- a/vendor/github.com/go-openapi/jsonreference/README.md +++ b/vendor/github.com/go-openapi/jsonreference/README.md @@ -8,12 +8,22 @@ [![Release][release-badge]][release-url] [![Go Report Card][gocard-badge]][gocard-url] [![CodeFactor Grade][codefactor-badge]][codefactor-url] [![License][license-badge]][license-url] -[![GoDoc][godoc-badge]][godoc-url] [![Slack Channel][slack-logo]![slack-badge]][slack-url] [![go version][goversion-badge]][goversion-url] ![Top language][top-badge] ![Commits since latest release][commits-badge] +[![GoDoc][godoc-badge]][godoc-url] [![Discord Channel][discord-badge]][discord-url] [![go version][goversion-badge]][goversion-url] ![Top language][top-badge] ![Commits since latest release][commits-badge] --- An implementation of JSON Reference for golang. +## Announcements + +* **2025-12-19** : new community chat on discord + * a new discord community channel is available to be notified of changes and support users + * our venerable Slack channel remains open, and will be eventually discontinued on **2026-03-31** + +You may join the discord community by clicking the invite link on the discord badge (also above). [![Discord Channel][discord-badge]][discord-url] + +Or join our Slack channel: [![Slack Channel][slack-logo]![slack-badge]][slack-url] + ## Status API is stable. @@ -26,18 +36,33 @@ go get github.com/go-openapi/jsonreference ## Dependencies -* https://github.com/go-openapi/jsonpointer +* ## Basic usage +```go +// Creating a new reference +ref, err := jsonreference.New("http://example.com/doc.json#/definitions/Pet") + +// Fragment-only reference +fragRef := jsonreference.MustCreateRef("#/definitions/Pet") + +// Resolving references +parent, _ := jsonreference.New("http://example.com/base.json") +child, _ := jsonreference.New("#/definitions/Pet") +resolved, _ := parent.Inherits(child) +// Result: "http://example.com/base.json#/definitions/Pet" +``` + + ## Change log See ## References -* http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 -* http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 +* +* ## Licensing @@ -89,6 +114,9 @@ Maintainers can cut a new release by either: [slack-logo]: https://a.slack-edge.com/e6a93c1/img/icons/favicon-32.png [slack-badge]: https://img.shields.io/badge/slack-blue?link=https%3A%2F%2Fgoswagger.slack.com%2Farchives%2FC04R30YM [slack-url]: https://goswagger.slack.com/archives/C04R30YMU +[discord-badge]: https://img.shields.io/discord/1446918742398341256?logo=discord&label=discord&color=blue +[discord-url]: https://discord.gg/twZ9BwT3 + [license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg [license-url]: https://github.com/go-openapi/jsonreference/?tab=Apache-2.0-1-ov-file#readme diff --git a/vendor/github.com/go-openapi/jsonreference/SECURITY.md b/vendor/github.com/go-openapi/jsonreference/SECURITY.md index 2a7b6f091..1fea2c573 100644 --- a/vendor/github.com/go-openapi/jsonreference/SECURITY.md +++ b/vendor/github.com/go-openapi/jsonreference/SECURITY.md @@ -6,14 +6,32 @@ This policy outlines the commitment and practices of the go-openapi maintainers | Version | Supported | | ------- | ------------------ | -| 0.22.x | :white_check_mark: | +| O.x | :white_check_mark: | + +## Vulnerability checks in place + +This repository uses automated vulnerability scans, at every merged commit and at least once a week. + +We use: + +* [`GitHub CodeQL`][codeql-url] +* [`trivy`][trivy-url] +* [`govulncheck`][govulncheck-url] + +Reports are centralized in github security reports and visible only to the maintainers. ## Reporting a vulnerability If you become aware of a security vulnerability that affects the current repository, -please report it privately to the maintainers. +**please report it privately to the maintainers** +rather than opening a publicly visible GitHub issue. + +Please follow the instructions provided by github to [Privately report a security vulnerability][github-guidance-url]. -Please follow the instructions provided by github to -[Privately report a security vulnerability](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability). +> [!NOTE] +> On Github, navigate to the project's "Security" tab then click on "Report a vulnerability". -TL;DR: on Github, navigate to the project's "Security" tab then click on "Report a vulnerability". +[codeql-url]: https://github.com/github/codeql +[trivy-url]: https://trivy.dev/docs/latest/getting-started +[govulncheck-url]: https://go.dev/blog/govulncheck +[github-guidance-url]: https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability diff --git a/vendor/github.com/go-openapi/jsonreference/reference.go b/vendor/github.com/go-openapi/jsonreference/reference.go index 6e3ae4995..003ba7a83 100644 --- a/vendor/github.com/go-openapi/jsonreference/reference.go +++ b/vendor/github.com/go-openapi/jsonreference/reference.go @@ -16,6 +16,7 @@ const ( fragmentRune = `#` ) +// ErrChildURL is raised when there is no child. var ErrChildURL = errors.New("child url is nil") // Ref represents a json reference object. diff --git a/vendor/github.com/go-openapi/swag/.gitignore b/vendor/github.com/go-openapi/swag/.gitignore index c4b1b64f0..a0a95a96b 100644 --- a/vendor/github.com/go-openapi/swag/.gitignore +++ b/vendor/github.com/go-openapi/swag/.gitignore @@ -3,3 +3,5 @@ vendor Godeps .idea *.out +.mcp.json +.claude/ diff --git a/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md index 9322b065e..bac878f21 100644 --- a/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md +++ b/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md @@ -23,7 +23,9 @@ include: Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or + advances + * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic @@ -55,7 +57,7 @@ further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All +reported by contacting the project team at . All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. @@ -68,7 +70,7 @@ members of the project's leadership. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] +available at [][version] [homepage]: http://contributor-covenant.org [version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/swag/CONTRIBUTORS.md b/vendor/github.com/go-openapi/swag/CONTRIBUTORS.md new file mode 100644 index 000000000..bc76fe820 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/CONTRIBUTORS.md @@ -0,0 +1,36 @@ +# Contributors + +- Repository: ['go-openapi/swag'] + +| Total Contributors | Total Contributions | +| --- | --- | +| 24 | 235 | + +| Username | All Time Contribution Count | All Commits | +| --- | --- | --- | +| @fredbi | 105 | | +| @casualjim | 98 | | +| @alexandear | 4 | | +| @orisano | 3 | | +| @reinerRubin | 2 | | +| @n-inja | 2 | | +| @nitinmohan87 | 2 | | +| @Neo2308 | 2 | | +| @michaelbowler-form3 | 2 | | +| @ujjwalsh | 1 | | +| @griffin-stewie | 1 | | +| @POD666 | 1 | | +| @pytlesk4 | 1 | | +| @shirou | 1 | | +| @seanprince | 1 | | +| @petrkotas | 1 | | +| @mszczygiel | 1 | | +| @sosiska | 1 | | +| @kzys | 1 | | +| @faguirre1 | 1 | | +| @posener | 1 | | +| @diego-fu-hs | 1 | | +| @davidalpert | 1 | | +| @Xe | 1 | | + + _this file was generated by the [Contributors GitHub Action](https://github.com/github/contributors)_ diff --git a/vendor/github.com/go-openapi/swag/README.md b/vendor/github.com/go-openapi/swag/README.md index 371fd55fd..834eb2ffb 100644 --- a/vendor/github.com/go-openapi/swag/README.md +++ b/vendor/github.com/go-openapi/swag/README.md @@ -1,26 +1,60 @@ -# Swag [![Build Status](https://github.com/go-openapi/swag/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/swag/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/swag/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/swag) +# Swag + + +[![Tests][test-badge]][test-url] [![Coverage][cov-badge]][cov-url] [![CI vuln scan][vuln-scan-badge]][vuln-scan-url] [![CodeQL][codeql-badge]][codeql-url] + + + +[![Release][release-badge]][release-url] [![Go Report Card][gocard-badge]][gocard-url] [![CodeFactor Grade][codefactor-badge]][codefactor-url] [![License][license-badge]][license-url] + + +[![GoDoc][godoc-badge]][godoc-url] [![Discord Channel][discord-badge]][discord-url] [![go version][goversion-badge]][goversion-url] ![Top language][top-badge] ![Commits since latest release][commits-badge] -[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) -[![license](https://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE) -[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/swag.svg)](https://pkg.go.dev/github.com/go-openapi/swag) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/swag)](https://goreportcard.com/report/github.com/go-openapi/swag) +--- -Package `swag` contains a bunch of helper functions for go-openapi and go-swagger projects. +A bunch of helper functions for go-openapi and go-swagger projects. You may also use it standalone for your projects. > **NOTE** > `swag` is one of the foundational building blocks of the go-openapi initiative. +> > Most repositories in `github.com/go-openapi/...` depend on it in some way. > And so does our CLI tool `github.com/go-swagger/go-swagger`, > as well as the code generated by this tool. * [Contents](#contents) * [Dependencies](#dependencies) -* [Release Notes](#release-notes) +* [Change log](#change-log) * [Licensing](#licensing) * [Note to contributors](#note-to-contributors) -* [TODOs, suggestions and plans](#todos-suggestions-and-plans) +* [Roadmap](#roadmap) + +## Announcements + +* **2025-12-19** : new community chat on discord + * a new discord community channel is available to be notified of changes and support users + * our venerable Slack channel remains open, and will be eventually discontinued on **2026-03-31** + +You may join the discord community by clicking the invite link on the discord badge (also above). [![Discord Channel][discord-badge]][discord-url] + +Or join our Slack channel: [![Slack Channel][slack-logo]![slack-badge]][slack-url] + +## Status + +API is stable. + +## Import this library in your project + +```cmd +go get github.com/go-openapi/swag/{module} +``` + +Or for backward compatibility: + +```cmd +go get github.com/go-openapi/swag +``` ## Contents @@ -36,7 +70,7 @@ Child modules will continue to evolve and some new ones may be added in the futu | `cmdutils` | utilities to work with CLIs || | `conv` | type conversion utilities | convert between values and pointers for any types
convert from string to builtin types (wraps `strconv`)
require `./typeutils` (test dependency)
| | `fileutils` | file utilities | | -| `jsonname` | JSON utilities | infer JSON names from `go` properties
| +| `jsonname` | JSON utilities | infer JSON names from `go` properties
| | `jsonutils` | JSON utilities | fast json concatenation
read and write JSON from and to dynamic `go` data structures
~require `github.com/mailru/easyjson`~
| | `loading` | file loading | load from file or http
require `./yamlutils`
| | `mangling` | safe name generation | name mangling for `go`
| @@ -49,84 +83,19 @@ Child modules will continue to evolve and some new ones may be added in the futu ## Dependencies -The root module `github.com/go-openapi/swag` at the repo level maintains a few +The root module `github.com/go-openapi/swag` at the repo level maintains a few dependencies outside of the standard library. * YAML utilities depend on `go.yaml.in/yaml/v3` * JSON utilities depend on their registered adapter module: - * by default, only the standard library is used - * `github.com/mailru/easyjson` is now only a dependency for module - `github.com/go-openapi/swag/jsonutils/adapters/easyjson/json`, - for users willing to import that module. - * integration tests and benchmarks use all the dependencies are published as their own module + * by default, only the standard library is used + * `github.com/mailru/easyjson` is now only a dependency for module + `github.com/go-openapi/swag/jsonutils/adapters/easyjson/json`, + for users willing to import that module. + * integration tests and benchmarks use all the dependencies are published as their own module * other dependencies are test dependencies drawn from `github.com/stretchr/testify` -## Release notes - -### v0.25.4 - -** mangling** - -Bug fix - -* [x] mangler may panic with pluralized overlapping initialisms - -Tests - -* [x] introduced fuzz tests - -### v0.25.3 - -** mangling** - -Bug fix - -* [x] mangler may panic with pluralized initialisms - -### v0.25.2 - -Minor changes due to internal maintenance that don't affect the behavior of the library. - -* [x] removed indirect test dependencies by switching all tests to `go-openapi/testify`, - a fork of `stretch/testify` with zero-dependencies. -* [x] improvements to CI to catch test reports. -* [x] modernized licensing annotations in source code, using the more compact SPDX annotations - rather than the full license terms. -* [x] simplified a bit JSON & YAML testing by using newly available assertions -* started the journey to an OpenSSF score card badge: - * [x] explicited permissions in CI workflows - * [x] published security policy - * pinned dependencies to github actions - * introduced fuzzing in tests - -### v0.25.1 - -* fixes a data race that could occur when using the standard library implementation of a JSON ordered map - -### v0.25.0 - -**New with this release**: - -* requires `go1.24`, as iterators are being introduced -* removes the dependency to `mailru/easyjson` by default (#68) - * functionality remains the same, but performance may somewhat degrade for applications - that relied on `easyjson` - * users of the JSON or YAML utilities who want to use `easyjson` as their preferred JSON serializer library - will be able to do so by registering this the corresponding JSON adapter at runtime. See below. - * ordered keys in JSON and YAML objects: this feature used to rely solely on `easyjson`. - With this release, an implementation relying on the standard `encoding/json` is provided. - * an independent [benchmark](./jsonutils/adapters/testintegration/benchmarks/README.md) to compare the different adapters -* improves the "float is integer" check (`conv.IsFloat64AJSONInteger`) (#59) -* removes the _direct_ dependency to `gopkg.in/yaml.v3` (indirect dependency is still incurred through `stretchr/testify`) (#127) -* exposed `conv.IsNil()` (previously kept private): a safe nil check (accounting for the "non-nil interface with nil value" nonsensical go trick) - -**What coming next?** - -Moving forward, we want to : -* provide an implementation of the JSON adapter based on `encoding/json/v2`, for `go1.25` builds. -* provide similar implementations for `goccy/go-json` and `jsoniterator/go`, and perhaps some other - similar libraries may be interesting too. - +## Usage **How to explicitly register a dependency at runtime**? @@ -150,90 +119,106 @@ or fallback to the standard library. For more details, you may also look at our [integration tests](jsonutils/adapters/testintegration/integration_suite_test.go#29). -### v0.24.0 +--- -With this release, we have largely modernized the API of `swag`: +## Note to contributors -* The traditional `swag` API is still supported: code that imports `swag` will still - compile and work the same. -* A deprecation notice is published to encourage consumers of this library to adopt - the newer API -* **Deprecation notice** - * configuration through global variables is now deprecated, in favor of options passed as parameters - * all helper functions are moved to more specialized packages, which are exposed as - go modules. Importing such a module would reduce the footprint of dependencies. - * _all_ functions, variables, constants exposed by the deprecated API have now moved, so - that consumers of the new API no longer need to import github.com/go-openapi/swag, but - should import the desired sub-module(s). +All kinds of contributions are welcome. -**New with this release**: +This repo is a go mono-repo. See [docs](docs/MAINTAINERS.md). -* [x] type converters and pointer to value helpers now support generic types -* [x] name mangling now support pluralized initialisms (issue #46) - Strings like "contact IDs" are now recognized as such a plural form and mangled as a linter would expect. -* [x] performance: small improvements to reduce the overhead of convert/format wrappers (see issues #110, or PR #108) -* [x] performance: name mangling utilities run ~ 10% faster (PR #115) +More general guidelines are available [here](.github/CONTRIBUTING.md). ---- +## Roadmap -## Licensing +See the current [TODO list](docs/TODOS.md) -This library ships under the [SPDX-License-Identifier: Apache-2.0](./LICENSE). +## Change log -## Note to contributors +See -A mono-repo structure comes with some unavoidable extra pains... +For pre-v0.26.0 releases, see [release notes](./docs/NOTES.md). -* Testing +**What coming next?** -> The usual `go test ./...` command, run from the root of this repo won't work any longer to test all submodules. -> -> Each module constitutes an independant unit of test. So you have to run `go test` inside each module. -> Or you may take a look at how this is achieved by CI -> [here] https://github.com/go-openapi/swag/blob/master/.github/workflows/go-test.yml). -> -> There are also some alternative tricks using `go work`, for local development, if you feel comfortable with -> go workspaces. Perhaps some day, we'll have a `go work test` to run all tests without any hack. +Moving forward, we want to : -* Releasing +* provide an implementation of the JSON adapter based on `encoding/json/v2`, for `go1.25` builds. +* provide similar implementations for `goccy/go-json` and `jsoniterator/go`, and perhaps some other + similar libraries may be interesting too. -> Each module follows its own independant module versioning. -> -> So you have tags like `mangling/v0.24.0`, `fileutils/v0.24.0` etc that are used by `go mod` and `go get` -> to refer to the tagged version of each module specifically. -> -> This means we may release patches etc to each module independently. -> -> We'd like to adopt the rule that modules in this repo would only differ by a patch version -> (e.g. `v0.24.5` vs `v0.24.3`), and we'll level all modules whenever a minor version is introduced. -> -> A script in `./hack` is provided to tag all modules with the same version in one go. + -## Todos, suggestions and plans +## Licensing -All kinds of contributions are welcome. +This library ships under the [SPDX-License-Identifier: Apache-2.0](./LICENSE). -A few ideas: - -* [x] Complete the split of dependencies to isolate easyjson from the rest -* [x] Improve CI to reduce needed tests -* [x] Replace dependency to `gopkg.in/yaml.v3` (`yamlutil`) -* [ ] Improve mangling utilities (improve readability, support for capitalized words, - better word substitution for non-letter symbols...) -* [ ] Move back to this common shared pot a few of the technical features introduced by go-swagger independently - (e.g. mangle go package names, search package with go modules support, ...) -* [ ] Apply a similar mono-repo approach to go-openapi/strfmt which suffer from similar woes: bloated API, - imposed dependency to some database driver. -* [ ] Adapt `go-swagger` (incl. generated code) to the new `swag` API. -* [ ] Factorize some tests, as there is a lot of redundant testing code in `jsonutils` -* [ ] Benchmark & profiling: publish independently the tool built to analyze and chart benchmarks (e.g. similar to `benchvisual`) -* [ ] more thorough testing for nil / null case -* [ ] ci pipeline to manage releases -* [ ] cleaner mockery generation (doesn't work out of the box for all sub-modules) + + + + +## Other documentation + +* [All-time contributors](./CONTRIBUTORS.md) +* [Contributing guidelines](.github/CONTRIBUTING.md) +* [Maintainers documentation](docs/MAINTAINERS.md) +* [Code style](docs/STYLE.md) + +## Cutting a new release + +Maintainers can cut a new release by either: + +* running [this workflow](https://github.com/go-openapi/swag/actions/workflows/bump-release.yml) +* or pushing a semver tag + * signed tags are preferred + * The tag message is prepended to release notes + + +[test-badge]: https://github.com/go-openapi/swag/actions/workflows/go-test.yml/badge.svg +[test-url]: https://github.com/go-openapi/swag/actions/workflows/go-test.yml +[cov-badge]: https://codecov.io/gh/go-openapi/swag/branch/master/graph/badge.svg +[cov-url]: https://codecov.io/gh/go-openapi/swag +[vuln-scan-badge]: https://github.com/go-openapi/swag/actions/workflows/scanner.yml/badge.svg +[vuln-scan-url]: https://github.com/go-openapi/swag/actions/workflows/scanner.yml +[codeql-badge]: https://github.com/go-openapi/swag/actions/workflows/codeql.yml/badge.svg +[codeql-url]: https://github.com/go-openapi/swag/actions/workflows/codeql.yml + +[release-badge]: https://badge.fury.io/gh/go-openapi%2Fswag.svg +[release-url]: https://badge.fury.io/gh/go-openapi%2Fswag +[gomod-badge]: https://badge.fury.io/go/github.com%2Fgo-openapi%2Fswag.svg +[gomod-url]: https://badge.fury.io/go/github.com%2Fgo-openapi%2Fswag + +[gocard-badge]: https://goreportcard.com/badge/github.com/go-openapi/swag +[gocard-url]: https://goreportcard.com/report/github.com/go-openapi/swag +[codefactor-badge]: https://img.shields.io/codefactor/grade/github/go-openapi/swag +[codefactor-url]: https://www.codefactor.io/repository/github/go-openapi/swag + +[doc-badge]: https://img.shields.io/badge/doc-site-blue?link=https%3A%2F%2Fgoswagger.io%2Fgo-openapi%2F +[doc-url]: https://goswagger.io/go-openapi +[godoc-badge]: https://pkg.go.dev/badge/github.com/go-openapi/swag +[godoc-url]: http://pkg.go.dev/github.com/go-openapi/swag +[slack-logo]: https://a.slack-edge.com/e6a93c1/img/icons/favicon-32.png +[slack-badge]: https://img.shields.io/badge/slack-blue?link=https%3A%2F%2Fgoswagger.slack.com%2Farchives%2FC04R30YM +[slack-url]: https://goswagger.slack.com/archives/C04R30YMU +[discord-badge]: https://img.shields.io/discord/1446918742398341256?logo=discord&label=discord&color=blue +[discord-url]: https://discord.gg/twZ9BwT3 + + +[license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg +[license-url]: https://github.com/go-openapi/swag/?tab=Apache-2.0-1-ov-file#readme + +[goversion-badge]: https://img.shields.io/github/go-mod/go-version/go-openapi/swag +[goversion-url]: https://github.com/go-openapi/swag/blob/master/go.mod +[top-badge]: https://img.shields.io/github/languages/top/go-openapi/swag +[commits-badge]: https://img.shields.io/github/commits-since/go-openapi/swag/latest diff --git a/vendor/github.com/go-openapi/swag/jsonutils/README.md b/vendor/github.com/go-openapi/swag/jsonutils/README.md index d745cdb46..07a2ca1d7 100644 --- a/vendor/github.com/go-openapi/swag/jsonutils/README.md +++ b/vendor/github.com/go-openapi/swag/jsonutils/README.md @@ -1,11 +1,11 @@ - # jsonutils +# jsonutils `jsonutils` exposes a few tools to work with JSON: - a fast, simple `Concat` to concatenate (not merge) JSON objects and arrays - `FromDynamicJSON` to convert a data structure into a "dynamic JSON" data structure - `ReadJSON` and `WriteJSON` behave like `json.Unmarshal` and `json.Marshal`, - with the ability to use another underlying serialization library through an `Adapter` + with the ability to use another underlying serialization library through an `Adapter` configured at runtime - a `JSONMapSlice` structure that may be used to store JSON objects with the order of keys maintained @@ -64,7 +64,7 @@ find a registered implementation that support ordered keys in objects. Our standard library implementation supports this. As of `v0.25.0`, we support through such an adapter the popular `mailru/easyjson` -library, which kicks in when the passed values support the `easyjson.Unmarshaler` +library, which kicks in when the passed values support the `easyjson.Unmarshaler` or `easyjson.Marshaler` interfaces. In the future, we plan to add more similar libraries that compete on the go JSON @@ -77,8 +77,9 @@ In package `github.com/go-openapi/swag/easyjson/adapters`, several adapters are Each adapter is an independent go module. Hence you'll pick its dependencies only if you import it. At this moment we provide: -* `stdlib`: JSON adapter based on the standard library -* `easyjson`: JSON adapter based on the `github.com/mailru/easyjson` + +- `stdlib`: JSON adapter based on the standard library +- `easyjson`: JSON adapter based on the `github.com/mailru/easyjson` The adapters provide the basic `Marshal` and `Unmarshal` capabilities, plus an implementation of the `MapSlice` pattern. diff --git a/vendor/github.com/go-openapi/swag/mangling/BENCHMARK.md b/vendor/github.com/go-openapi/swag/mangling/BENCHMARK.md index 6674c63b7..abe6e9533 100644 --- a/vendor/github.com/go-openapi/swag/mangling/BENCHMARK.md +++ b/vendor/github.com/go-openapi/swag/mangling/BENCHMARK.md @@ -4,7 +4,7 @@ go test -bench XXX -run XXX -benchtime 30s ``` -## Benchmarks at b3e7a5386f996177e4808f11acb2aa93a0f660df +## Benchmarks at `b3e7a5386f996177e4808f11acb2aa93a0f660df` ``` goos: linux @@ -49,7 +49,7 @@ BenchmarkToXXXName/ToHumanNameLower-16 18599661 1946 ns/op 92 B/op BenchmarkToXXXName/ToHumanNameTitle-16 17581353 2054 ns/op 105 B/op 6 allocs/op ``` -## Benchmarks at d7d2d1b895f5b6747afaff312dd2a402e69e818b +## Benchmarks at `d7d2d1b895f5b6747afaff312dd2a402e69e818b` go1.24 diff --git a/vendor/github.com/google/cel-go/cel/BUILD.bazel b/vendor/github.com/google/cel-go/cel/BUILD.bazel index 89cf460d3..46cb26d62 100644 --- a/vendor/github.com/google/cel-go/cel/BUILD.bazel +++ b/vendor/github.com/google/cel-go/cel/BUILD.bazel @@ -10,6 +10,7 @@ go_library( "cel.go", "decls.go", "env.go", + "fieldpaths.go", "folding.go", "inlining.go", "io.go", @@ -43,6 +44,7 @@ go_library( "//interpreter:go_default_library", "//parser:go_default_library", "@dev_cel_expr//:expr", + "@dev_cel_expr//conformance/proto3:go_default_library", "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//reflect/protodesc:go_default_library", @@ -63,6 +65,7 @@ go_test( "cel_test.go", "decls_test.go", "env_test.go", + "fieldpaths_test.go", "folding_test.go", "inlining_test.go", "io_test.go", @@ -78,6 +81,7 @@ go_test( ], embedsrcs = [ "//cel/testdata:prompts", + "//cel/testdata:test_fds_with_source_info", ], deps = [ "//common/operators:go_default_library", @@ -89,6 +93,7 @@ go_test( "//test:go_default_library", "//test/proto2pb:go_default_library", "//test/proto3pb:go_default_library", + "@com_github_google_go_cmp//cmp:go_default_library", "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//encoding/prototext:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", @@ -100,4 +105,4 @@ go_test( exports_files( ["templates/authoring.tmpl"], visibility = ["//visibility:public"], -) \ No newline at end of file +) diff --git a/vendor/github.com/google/cel-go/cel/env.go b/vendor/github.com/google/cel-go/cel/env.go index 58819e872..e2de2ff6f 100644 --- a/vendor/github.com/google/cel-go/cel/env.go +++ b/vendor/github.com/google/cel-go/cel/env.go @@ -18,6 +18,8 @@ import ( "errors" "fmt" "math" + "slices" + "strings" "sync" "github.com/google/cel-go/checker" @@ -139,6 +141,7 @@ type Env struct { provider types.Provider features map[int]bool appliedFeatures map[int]bool + limits map[limitID]int libraries map[string]SingletonLibrary validators []ASTValidator costOptions []checker.CostOption @@ -181,6 +184,16 @@ func (e *Env) ToConfig(name string) (*env.Config, error) { conf.AddImports(env.NewImport(typeName)) } + // Serialize features + for featID, enabled := range e.features { + featName, found := featureNameByID(featID) + if !found { + // If the feature isn't named, it isn't intended to be publicly exposed + continue + } + conf.AddFeatures(env.NewFeature(featName, enabled)) + } + libOverloads := map[string][]string{} for libName, lib := range e.libraries { // Track the options which have been configured by a library and @@ -241,7 +254,7 @@ func (e *Env) ToConfig(name string) (*env.Config, error) { fields := e.contextProto.Fields() for i := 0; i < fields.Len(); i++ { field := fields.Get(i) - variable, err := fieldToVariable(field) + variable, err := fieldToVariable(field, e.HasFeature(featureJSONFieldNames)) if err != nil { return nil, fmt.Errorf("could not serialize context field variable %q, reason: %w", field.FullName(), err) } @@ -276,16 +289,45 @@ func (e *Env) ToConfig(name string) (*env.Config, error) { } } - // Serialize features - for featID, enabled := range e.features { - featName, found := featureNameByID(featID) - if !found { - // If the feature isn't named, it isn't intended to be publicly exposed + for id, val := range e.limits { + limitName, found := limitNameByID(id) + if !found || val == 0 { + // skip if explicitly defaulted or not supported in config continue } - conf.AddFeatures(env.NewFeature(featName, enabled)) + conf.AddLimits(env.NewLimit(limitName, val)) } + // Sort repeated fields in config where reasonable to make the export + // stable. + slices.SortFunc(conf.Imports, func(a *env.Import, b *env.Import) int { + return strings.Compare(a.Name, b.Name) + }) + + slices.SortFunc(conf.Extensions, func(a *env.Extension, b *env.Extension) int { + return strings.Compare(a.Name, b.Name) + }) + + slices.SortFunc(conf.Variables, func(a *env.Variable, b *env.Variable) int { + return strings.Compare(a.Name, b.Name) + }) + + slices.SortFunc(conf.Functions, func(a *env.Function, b *env.Function) int { + return strings.Compare(a.Name, b.Name) + }) + + slices.SortFunc(conf.Validators, func(a *env.Validator, b *env.Validator) int { + return strings.Compare(a.Name, b.Name) + }) + + slices.SortFunc(conf.Features, func(a *env.Feature, b *env.Feature) int { + return strings.Compare(a.Name, b.Name) + }) + + slices.SortFunc(conf.Limits, func(a *env.Limit, b *env.Limit) int { + return strings.Compare(a.Name, b.Name) + }) + return conf, nil } @@ -319,7 +361,7 @@ func NewEnv(opts ...EnvOption) (*Env, error) { // See the EnvOption helper functions for the options that can be used to configure the // environment. func NewCustomEnv(opts ...EnvOption) (*Env, error) { - registry, err := types.NewRegistry() + registry, err := types.NewProtoRegistry() if err != nil { return nil, err } @@ -333,6 +375,7 @@ func NewCustomEnv(opts ...EnvOption) (*Env, error) { provider: registry, features: map[int]bool{}, appliedFeatures: map[int]bool{}, + limits: map[limitID]int{}, libraries: map[string]SingletonLibrary{}, validators: []ASTValidator{}, progOpts: []ProgramOption{}, @@ -497,6 +540,10 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) { for k, v := range e.appliedFeatures { appliedFeaturesCopy[k] = v } + limitsCopy := make(map[limitID]int, len(e.limits)) + for k, v := range e.limits { + limitsCopy[k] = v + } funcsCopy := make(map[string]*decls.FunctionDecl, len(e.functions)) for k, v := range e.functions { funcsCopy[k] = v @@ -507,6 +554,7 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) { } validatorsCopy := make([]ASTValidator, len(e.validators)) copy(validatorsCopy, e.validators) + costOptsCopy := make([]checker.CostOption, len(e.costOptions)) copy(costOptsCopy, e.costOptions) @@ -519,6 +567,7 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) { progOpts: progOptsCopy, adapter: adapter, features: featuresCopy, + limits: limitsCopy, appliedFeatures: appliedFeaturesCopy, libraries: libsCopy, validators: validatorsCopy, @@ -785,11 +834,32 @@ func (e *Env) configure(opts []EnvOption) (*Env, error) { if e.HasFeature(featureIdentEscapeSyntax) { prsrOpts = append(prsrOpts, parser.EnableIdentEscapeSyntax(true)) } + if l := e.limits[limitParseErrorRecovery]; l != 0 { + prsrOpts = append(prsrOpts, parser.ErrorRecoveryLimit(l)) + } + if l := e.limits[limitCodePointSize]; l != 0 { + prsrOpts = append(prsrOpts, parser.ExpressionSizeCodePointLimit(l)) + } + if l := e.limits[limitParseRecursionDepth]; l != 0 { + prsrOpts = append(prsrOpts, parser.MaxRecursionDepth(l)) + } e.prsr, err = parser.NewParser(prsrOpts...) if err != nil { return nil, err } + // Enable JSON field names is using a proto-based *types.Registry + if e.HasFeature(featureJSONFieldNames) { + reg, isReg := e.provider.(*types.Registry) + if !isReg { + return nil, fmt.Errorf("JSONFieldNames() option is only compatible with *types.Registry providers") + } + err := reg.WithJSONFieldNames(true) + if err != nil { + return nil, err + } + } + // Ensure that the checker init happens eagerly rather than lazily. if e.HasFeature(featureEagerlyValidateDeclarations) { _, err := e.initChecker() @@ -808,6 +878,8 @@ func (e *Env) initChecker() (*checker.Env, error) { chkOpts = append(chkOpts, checker.CrossTypeNumericComparisons( e.HasFeature(featureCrossTypeNumericComparisons))) + chkOpts = append(chkOpts, + checker.JSONFieldNames(e.HasFeature(featureJSONFieldNames))) ce, err := checker.NewEnv(e.Container, e.provider, chkOpts...) if err != nil { @@ -877,6 +949,16 @@ type Issues struct { info *celast.SourceInfo } +// ErrorAsIssues wraps a Golang error into a CEL common error and issue set. +// +// This is a convenience method for early returning from an expression validation call path due to +// internal state or configuration which is unrelated to the source being validated. +func ErrorAsIssues(err error) *Issues { + errs := common.NewErrors(common.NewTextSource("")) + errs.ReportErrorString(common.NoLocation, err.Error()) + return NewIssues(errs) +} + // NewIssues returns an Issues struct from a common.Errors object. func NewIssues(errs *common.Errors) *Issues { return NewIssuesWithSourceInfo(errs, nil) @@ -985,9 +1067,10 @@ func (p *interopCELTypeProvider) FindStructFieldType(structType, fieldName strin return nil, false } return &types.FieldType{ - Type: t, - IsSet: ft.IsSet, - GetFrom: ft.GetFrom, + Type: t, + IsSet: ft.IsSet, + GetFrom: ft.GetFrom, + IsJSONField: ft.IsJSONField, }, true } return nil, false diff --git a/vendor/github.com/google/cel-go/cel/fieldpaths.go b/vendor/github.com/google/cel-go/cel/fieldpaths.go new file mode 100644 index 000000000..570fce3a4 --- /dev/null +++ b/vendor/github.com/google/cel-go/cel/fieldpaths.go @@ -0,0 +1,163 @@ +package cel + +import ( + "slices" + "strings" + + "github.com/google/cel-go/common" + "github.com/google/cel-go/common/types" +) + +// fieldPath represents a selection path to a field from a variable in a CEL environment. +type fieldPath struct { + celType *Type + // path represents the selection path to the field. + path string + description string + isLeaf bool +} + +// Documentation implements the Documentor interface. +func (f *fieldPath) Documentation() *common.Doc { + return common.NewFieldDoc(f.path, f.celType.String(), f.description) +} + +type documentationProvider interface { + // FindStructFieldDescription returns documentation for a field if available. + // Returns false if the field could not be found. + FindStructFieldDescription(typeName, fieldName string) (string, bool) +} + +type backtrack struct { + // provider used to resolve types. + provider types.Provider + // paths of fields that have been visited along the path. + path []string + // types of fields that have been visited along the path. used to avoid cycles. + types []*Type +} + +func (b *backtrack) push(pathStep string, celType *Type) { + b.path = append(b.path, pathStep) + b.types = append(b.types, celType) +} + +func (b *backtrack) pop() { + b.path = b.path[:len(b.path)-1] + b.types = b.types[:len(b.types)-1] +} + +func formatPath(path []string) string { + var buffer strings.Builder + for i, p := range path { + if i == 0 { + buffer.WriteString(p) + continue + } + if strings.HasPrefix(p, "[") { + buffer.WriteString(p) + continue + } + buffer.WriteString(".") + buffer.WriteString(p) + } + return buffer.String() +} + +func (b *backtrack) expandFieldPaths(celType *Type, paths []*fieldPath) []*fieldPath { + if slices.ContainsFunc(b.types[:len(b.types)-1], func(t *Type) bool { return t.String() == celType.String() }) { + // Cycle detected, so stop expanding. + paths[len(paths)-1].isLeaf = false + return paths + } + switch celType.Kind() { + case types.StructKind: + fields, ok := b.provider.FindStructFieldNames(celType.String()) + if !ok { + // Caller added this type to the path, so it must be a leaf. + paths[len(paths)-1].isLeaf = true + return paths + } + for _, field := range fields { + fieldType, ok := b.provider.FindStructFieldType(celType.String(), field) + if !ok { + // Field not found, either hidden or an error. + continue + } + b.push(field, celType) + description := "" + if docProvider, ok := b.provider.(documentationProvider); ok { + description, _ = docProvider.FindStructFieldDescription(celType.String(), field) + } + path := &fieldPath{ + celType: fieldType.Type, + path: formatPath(b.path), + description: description, + isLeaf: false, + } + paths = append(paths, path) + paths = b.expandFieldPaths(fieldType.Type, paths) + b.pop() + } + return paths + case types.MapKind: + if len(celType.Parameters()) != 2 { + // dynamic map, so treat as a leaf. + paths[len(paths)-1].isLeaf = true + return paths + } + mapKeyType := celType.Parameters()[0] + mapValueType := celType.Parameters()[1] + // Add a placeholder for the map key kind (the zero value). + keyIdentifier := "" + switch mapKeyType.Kind() { + case types.StringKind: + keyIdentifier = "[\"\"]" + case types.IntKind: + keyIdentifier = "[0]" + case types.UintKind: + keyIdentifier = "[0u]" + case types.BoolKind: + keyIdentifier = "[false]" + default: + // Caller added this type to the path, so it must be a leaf. + paths[len(paths)-1].isLeaf = true + return paths + } + b.push(keyIdentifier, mapValueType) + defer b.pop() + return b.expandFieldPaths(mapValueType, paths) + case types.ListKind: + if len(celType.Parameters()) != 1 { + // dynamic list, so treat as a leaf. + paths[len(paths)-1].isLeaf = true + return paths + } + listElemType := celType.Parameters()[0] + b.push("[0]", listElemType) + defer b.pop() + return b.expandFieldPaths(listElemType, paths) + default: + paths[len(paths)-1].isLeaf = true + } + + return paths +} + +// fieldPathsForType expands the reachable fields from the given root identifier. +func fieldPathsForType(provider types.Provider, identifier string, celType *Type) []*fieldPath { + b := &backtrack{ + provider: provider, + path: []string{identifier}, + types: []*Type{celType}, + } + paths := []*fieldPath{ + { + celType: celType, + path: identifier, + isLeaf: false, + }, + } + + return b.expandFieldPaths(celType, paths) +} diff --git a/vendor/github.com/google/cel-go/cel/inlining.go b/vendor/github.com/google/cel-go/cel/inlining.go index a4530e19e..d9a5e89a5 100644 --- a/vendor/github.com/google/cel-go/cel/inlining.go +++ b/vendor/github.com/google/cel-go/cel/inlining.go @@ -178,9 +178,38 @@ func (opt *inliningOptimizer) rewritePresenceExpr(ctx *OptimizerContext, prev, i )) return } + if zeroValExpr, ok := zeroValueExpr(ctx, inlinedType); ok { + ctx.UpdateExpr(prev, + ctx.NewCall(operators.NotEquals, + inlined, zeroValExpr)) + return + } ctx.ReportErrorAtID(prev.ID(), "unable to inline expression type %v into presence test", inlinedType) } +// zeroValueExpr creates an expression representing the empty or zero value for the given type +// Note: bytes, lists, maps, and strings are supported via the `SizerType` trait. +func zeroValueExpr(ctx *OptimizerContext, t *Type) (ast.Expr, bool) { + // Note: bytes, strings, lists, and maps are covered by the "sizer-type" check + switch t.Kind() { + case types.BoolKind: + return ctx.NewLiteral(types.False), true + case types.DoubleKind: + return ctx.NewLiteral(types.Double(0)), true + case types.DurationKind: + return ctx.NewCall(overloads.TypeConvertDuration, ctx.NewLiteral(types.String("0s"))), true + case types.IntKind: + return ctx.NewLiteral(types.IntZero), true + case types.TimestampKind: + return ctx.NewCall(overloads.TypeConvertTimestamp, ctx.NewLiteral(types.Int(0))), true + case types.StructKind: + return ctx.NewStruct(t.TypeName(), []ast.EntryExpr{}), true + case types.UintKind: + return ctx.NewLiteral(types.Uint(0)), true + } + return nil, false +} + // isBindable indicates whether the inlined type can be used within a cel.bind() if the expression // being replaced occurs within a presence test. Value types with a size() method or field selection // support can be bound. @@ -212,17 +241,43 @@ func isBindable(matches []ast.NavigableExpr, inlined ast.Expr, inlinedType *Type // field selection. This may be a future refinement. func (opt *inliningOptimizer) matchVariable(varName string) ast.ExprMatcher { return func(e ast.NavigableExpr) bool { - if e.Kind() == ast.IdentKind && e.AsIdent() == varName { - return true + name, found := maybeAsVariableName(e) + if !found || name != varName { + return false + } + + // Determine whether the variable being referenced has been shadowed by a comprehension + p, hasParent := e.Parent() + for hasParent { + if p.Kind() != ast.ComprehensionKind { + p, hasParent = p.Parent() + continue + } + // If the inline variable name matches any of the comprehension variables at any scope, + // return false as the variable has been shadowed. + compre := p.AsComprehension() + if varName == compre.AccuVar() || varName == compre.IterVar() || varName == compre.IterVar2() { + return false + } + p, hasParent = p.Parent() } - if e.Kind() == ast.SelectKind { - sel := e.AsSelect() - // While the `ToQualifiedName` call could take the select directly, this - // would skip presence tests from possible matches, which we would like - // to include. - qualName, found := containers.ToQualifiedName(sel.Operand()) - return found && qualName+"."+sel.FieldName() == varName + + return true + } +} + +func maybeAsVariableName(e ast.NavigableExpr) (string, bool) { + if e.Kind() == ast.IdentKind { + return e.AsIdent(), true + } + if e.Kind() == ast.SelectKind { + sel := e.AsSelect() + // While the `ToQualifiedName` call could take the select directly, this + // would skip presence tests from possible matches, which we would like + // to include. + if qualName, found := containers.ToQualifiedName(sel.Operand()); found { + return qualName + "." + sel.FieldName(), true } - return false } + return "", false } diff --git a/vendor/github.com/google/cel-go/cel/library.go b/vendor/github.com/google/cel-go/cel/library.go index bc13add89..3c8b6ba34 100644 --- a/vendor/github.com/google/cel-go/cel/library.go +++ b/vendor/github.com/google/cel-go/cel/library.go @@ -734,14 +734,17 @@ func (opt *evalOptionalOr) ID() int64 { func (opt *evalOptionalOr) Eval(ctx interpreter.Activation) ref.Val { // short-circuit lhs. optLHS := opt.lhs.Eval(ctx) - optVal, ok := optLHS.(*types.Optional) - if !ok { + switch val := optLHS.(type) { + case *types.Err, *types.Unknown: return optLHS + case *types.Optional: + if val.HasValue() { + return optLHS + } + return opt.rhs.Eval(ctx) + default: + return types.NoSuchOverloadErr() } - if optVal.HasValue() { - return optVal - } - return opt.rhs.Eval(ctx) } // evalOptionalOrValue selects between an optional or a concrete value. If the optional has a value, @@ -762,14 +765,18 @@ func (opt *evalOptionalOrValue) ID() int64 { func (opt *evalOptionalOrValue) Eval(ctx interpreter.Activation) ref.Val { // short-circuit lhs. optLHS := opt.lhs.Eval(ctx) - optVal, ok := optLHS.(*types.Optional) - if !ok { + + switch val := optLHS.(type) { + case *types.Err, *types.Unknown: return optLHS + case *types.Optional: + if val.HasValue() { + return val.GetValue() + } + return opt.rhs.Eval(ctx) + default: + return types.NoSuchOverloadErr() } - if optVal.HasValue() { - return optVal.GetValue() - } - return opt.rhs.Eval(ctx) } type timeLegacyLibrary struct{} diff --git a/vendor/github.com/google/cel-go/cel/options.go b/vendor/github.com/google/cel-go/cel/options.go index fee67323c..d7d2ab034 100644 --- a/vendor/github.com/google/cel-go/cel/options.go +++ b/vendor/github.com/google/cel-go/cel/options.go @@ -71,12 +71,16 @@ const ( // Enable escape syntax for field identifiers (`). featureIdentEscapeSyntax + + // Enable accessing fields by JSON names within protobuf messages + featureJSONFieldNames ) var featureIDsToNames = map[int]string{ featureEnableMacroCallTracking: "cel.feature.macro_call_tracking", featureCrossTypeNumericComparisons: "cel.feature.cross_type_numeric_comparisons", featureIdentEscapeSyntax: "cel.feature.backtick_escape_syntax", + featureJSONFieldNames: "cel.feature.json_field_names", } func featureNameByID(id int) (string, bool) { @@ -93,6 +97,40 @@ func featureIDByName(name string) (int, bool) { return 0, false } +// limitID is used as a key for configurable limits. These are options that +// support exporting to YAML environment config. +type limitID int + +const ( + _ = limitID(iota) + // The number of recursive calls permitted in parsing. + limitParseRecursionDepth + // The number of code points permitted in an input expression string. + limitCodePointSize + // The number of attempts to recover from a parse error. + limitParseErrorRecovery +) + +var limitIDsToNames = map[limitID]string{ + limitCodePointSize: "cel.limit.expression_code_points", + limitParseErrorRecovery: "cel.limit.parse_error_recovery", + limitParseRecursionDepth: "cel.limit.parse_recursion_depth", +} + +func limitNameByID(id limitID) (string, bool) { + v, ok := limitIDsToNames[id] + return v, ok +} + +func limitIDByName(name string) (limitID, bool) { + for k, v := range limitIDsToNames { + if v == name { + return k, true + } + } + return limitID(0), false +} + // EnvOption is a functional interface for configuring the environment. type EnvOption func(e *Env) (*Env, error) @@ -275,9 +313,9 @@ func Abbrevs(qualifiedNames ...string) EnvOption { } } -// customTypeRegistry is an internal-only interface containing the minimum methods required to support +// protoTypeRegistry is an internal-only interface containing the minimum methods required to support // custom types. It is a subset of methods from ref.TypeRegistry. -type customTypeRegistry interface { +type protoTypeRegistry interface { RegisterDescriptor(protoreflect.FileDescriptor) error RegisterType(...ref.Type) error } @@ -294,7 +332,7 @@ type customTypeRegistry interface { // Note: This option must be specified after the CustomTypeProvider option when used together. func Types(addTypes ...any) EnvOption { return func(e *Env) (*Env, error) { - reg, isReg := e.provider.(customTypeRegistry) + reg, isReg := e.provider.(protoTypeRegistry) if !isReg { return nil, fmt.Errorf("custom types not supported by provider: %T", e.provider) } @@ -331,7 +369,7 @@ func Types(addTypes ...any) EnvOption { // extension or by re-using the same EnvOption with another NewEnv() call. func TypeDescs(descs ...any) EnvOption { return func(e *Env) (*Env, error) { - reg, isReg := e.provider.(customTypeRegistry) + reg, isReg := e.provider.(protoTypeRegistry) if !isReg { return nil, fmt.Errorf("custom types not supported by provider: %T", e.provider) } @@ -379,7 +417,7 @@ func TypeDescs(descs ...any) EnvOption { } } -func registerFileSet(reg customTypeRegistry, fileSet *descpb.FileDescriptorSet) error { +func registerFileSet(reg protoTypeRegistry, fileSet *descpb.FileDescriptorSet) error { files, err := protodesc.NewFiles(fileSet) if err != nil { return fmt.Errorf("protodesc.NewFiles(%v) failed: %v", fileSet, err) @@ -387,7 +425,7 @@ func registerFileSet(reg customTypeRegistry, fileSet *descpb.FileDescriptorSet) return registerFiles(reg, files) } -func registerFiles(reg customTypeRegistry, files *protoregistry.Files) error { +func registerFiles(reg protoTypeRegistry, files *protoregistry.Files) error { var err error files.RangeFiles(func(fd protoreflect.FileDescriptor) bool { err = reg.RegisterDescriptor(fd) @@ -396,6 +434,15 @@ func registerFiles(reg customTypeRegistry, files *protoregistry.Files) error { return err } +// JSONFieldNames supports accessing protocol buffer fields by json-name. +// +// Enabling JSON field name support will create a copy of the types.Registry with fields indexed +// by JSON name, and whether JSON name or Proto-style names are supported will be inferred from +// the AST extensions metadata. +func JSONFieldNames(enabled bool) EnvOption { + return features(featureJSONFieldNames, enabled) +} + // ProgramOption is a functional interface for configuring evaluation bindings and behaviors. type ProgramOption func(p *prog) (*prog, error) @@ -523,6 +570,17 @@ func configToEnvOptions(config *env.Config, provider types.Provider, optFactorie envOpts = append(envOpts, Abbrevs(imp.Name)) } + // Configure features and common limits. + for _, feat := range config.Features { + // Note, if a feature is not found, it is skipped as it is possible the feature + // is not intended to be supported publicly. In the future, a refinement of + // to this strategy to report unrecognized features and validators should probably + // be covered as a standard ConfigOptionFactory + if id, found := featureIDByName(feat.Name); found { + envOpts = append(envOpts, features(id, feat.Enabled)) + } + } + // Configure the context variable declaration if config.ContextVariable != nil { typeName := config.ContextVariable.TypeName @@ -564,14 +622,9 @@ func configToEnvOptions(config *env.Config, provider types.Provider, optFactorie envOpts = append(envOpts, FunctionDecls(funcs...)) } - // Configure features - for _, feat := range config.Features { - // Note, if a feature is not found, it is skipped as it is possible the feature - // is not intended to be supported publicly. In the future, a refinement of - // to this strategy to report unrecognized features and validators should probably - // be covered as a standard ConfigOptionFactory - if id, found := featureIDByName(feat.Name); found { - envOpts = append(envOpts, features(id, feat.Enabled)) + for _, limit := range config.Limits { + if id, found := limitIDByName(limit.Name); found { + envOpts = append(envOpts, setLimit(id, limit.Value)) } } @@ -727,8 +780,11 @@ func fieldToCELType(field protoreflect.FieldDescriptor) (*Type, error) { return nil, fmt.Errorf("field %s type %s not implemented", field.FullName(), field.Kind().String()) } -func fieldToVariable(field protoreflect.FieldDescriptor) (*decls.VariableDecl, error) { +func fieldToVariable(field protoreflect.FieldDescriptor, jsonFieldNames bool) (*decls.VariableDecl, error) { name := string(field.Name()) + if jsonFieldNames { + name = field.JSONName() + } if field.IsMap() { mapKey := field.MapKey() mapValue := field.MapValue() @@ -759,6 +815,8 @@ func fieldToVariable(field protoreflect.FieldDescriptor) (*decls.VariableDecl, e // DeclareContextProto returns an option to extend CEL environment with declarations from the given context proto. // Each field of the proto defines a variable of the same name in the environment. // https://github.com/google/cel-spec/blob/master/doc/langdef.md#evaluation-environment +// +// If using JSONFieldNames(), ensure that the option is set before DeclareContextProto is provided. func DeclareContextProto(descriptor protoreflect.MessageDescriptor) EnvOption { return func(e *Env) (*Env, error) { if e.contextProto != nil { @@ -768,9 +826,10 @@ func DeclareContextProto(descriptor protoreflect.MessageDescriptor) EnvOption { e.contextProto = descriptor fields := descriptor.Fields() vars := make([]*decls.VariableDecl, 0, fields.Len()) + jsonFieldNames := e.HasFeature(featureJSONFieldNames) for i := 0; i < fields.Len(); i++ { field := fields.Get(i) - variable, err := fieldToVariable(field) + variable, err := fieldToVariable(field, jsonFieldNames) if err != nil { return nil, err } @@ -789,11 +848,15 @@ func DeclareContextProto(descriptor protoreflect.MessageDescriptor) EnvOption { // // Consider using with `DeclareContextProto` to simplify variable type declarations and publishing when using // protocol buffers. -func ContextProtoVars(ctx proto.Message) (Activation, error) { +// +// Use the types.JSONFieldNames(true) option to populate the context proto vars using the JSON field names. +func ContextProtoVars(ctx proto.Message, opts ...types.RegistryOption) (Activation, error) { if ctx == nil || !ctx.ProtoReflect().IsValid() { return interpreter.EmptyActivation(), nil } - reg, err := types.NewRegistry(ctx) + regOpts := []types.RegistryOption{types.ProtoTypeDefs(ctx)} + regOpts = append(regOpts, opts...) + reg, err := types.NewProtoRegistry(regOpts...) if err != nil { return nil, err } @@ -803,15 +866,19 @@ func ContextProtoVars(ctx proto.Message) (Activation, error) { vars := make(map[string]any, fields.Len()) for i := 0; i < fields.Len(); i++ { field := fields.Get(i) - sft, found := reg.FindStructFieldType(typeName, field.TextName()) + fieldName := field.TextName() + if reg.JSONFieldNames() { + fieldName = field.JSONName() + } + sft, found := reg.FindStructFieldType(typeName, fieldName) if !found { - return nil, fmt.Errorf("no such field: %s", field.TextName()) + return nil, fmt.Errorf("no such field: %s", fieldName) } fieldVal, err := sft.GetFrom(ctx) if err != nil { return nil, err } - vars[field.TextName()] = fieldVal + vars[fieldName] = fieldVal } return NewActivation(vars) } @@ -847,22 +914,32 @@ func features(flag int, enabled bool) EnvOption { } } -// ParserRecursionLimit adjusts the AST depth the parser will tolerate. -// Defaults defined in the parser package. -func ParserRecursionLimit(limit int) EnvOption { +func setLimit(id limitID, limit int) EnvOption { + if limit < 0 { + limit = -1 + } return func(e *Env) (*Env, error) { - e.prsrOpts = append(e.prsrOpts, parser.MaxRecursionDepth(limit)) + e.limits[id] = limit return e, nil } } -// ParserExpressionSizeLimit adjusts the number of code points the expression parser is allowed to parse. +// ParserRecursionLimit adjusts the AST depth the parser will tolerate. // Defaults defined in the parser package. +func ParserRecursionLimit(limit int) EnvOption { + return setLimit(limitParseRecursionDepth, limit) +} + +// ParserErrorRecoveryLimit sets the number of attemtps the parser will take +// to recover after encountering an error. +func ParserErrorRecoveryLimit(limit int) EnvOption { + return setLimit(limitParseErrorRecovery, limit) +} + +// ParserExpressionSizeLimit adjusts the number of code points the expression parser is allowed to parse. +// Defaults are defined in the parser package. A negative value means unbounded. func ParserExpressionSizeLimit(limit int) EnvOption { - return func(e *Env) (*Env, error) { - e.prsrOpts = append(e.prsrOpts, parser.ExpressionSizeCodePointLimit(limit)) - return e, nil - } + return setLimit(limitCodePointSize, limit) } // EnableHiddenAccumulatorName sets the parser to use the identifier '@result' for accumulators diff --git a/vendor/github.com/google/cel-go/cel/program.go b/vendor/github.com/google/cel-go/cel/program.go index ec3869bdb..c46d694e4 100644 --- a/vendor/github.com/google/cel-go/cel/program.go +++ b/vendor/github.com/google/cel-go/cel/program.go @@ -16,6 +16,7 @@ package cel import ( "context" + "errors" "fmt" "sync" @@ -218,6 +219,12 @@ func newProgram(e *Env, a *ast.AST, opts []ProgramOption) (Program, error) { attrFactorOpts := []interpreter.AttrFactoryOption{ interpreter.EnableErrorOnBadPresenceTest(p.HasFeature(featureEnableErrorOnBadPresenceTest)), } + if a.SourceInfo().HasExtension("json_name", ast.NewExtensionVersion(1, 1)) { + if !e.HasFeature(featureJSONFieldNames) { + return nil, errors.New("the AST extension 'json_name' requires the option cel.JSONFieldNames(true)") + } + } + // Configure the type provider, considering whether the AST indicates whether it supports JSON field names if p.evalOpts&OptPartialEval == OptPartialEval { attrFactory = interpreter.NewPartialAttributeFactory(e.Container, e.adapter, e.provider, attrFactorOpts...) } else { @@ -361,7 +368,11 @@ func (p *prog) ContextEval(ctx context.Context, input any) (ref.Val, *EvalDetail default: return nil, nil, fmt.Errorf("invalid input, wanted Activation or map[string]any, got: (%T)%v", input, input) } - return p.Eval(vars) + out, det, err := p.Eval(vars) + if err != nil && errors.Is(err, interpreter.InterruptError{}) { + return out, det, context.Cause(ctx) + } + return out, det, err } type ctxEvalActivation struct { diff --git a/vendor/github.com/google/cel-go/cel/prompt.go b/vendor/github.com/google/cel-go/cel/prompt.go index 929a26f91..1529680fd 100644 --- a/vendor/github.com/google/cel-go/cel/prompt.go +++ b/vendor/github.com/google/cel-go/cel/prompt.go @@ -23,15 +23,48 @@ import ( "github.com/google/cel-go/common" "github.com/google/cel-go/common/operators" "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/types" ) //go:embed templates/authoring.tmpl var authoringPrompt string +// splitImpl splits a string into a list of strings. +// +// Normalizes extracted comments (trim common prefix whitespace and extra trailing newlines). +func splitImpl(str string) []string { + str = strings.TrimRight(str, " \n\t\r") + out := strings.Split(str, "\n") + if len(out) == 0 { + return nil + } + negative := strings.TrimLeft(out[0], " \t") + lenNegative := len(negative) + lenOut := len(out[0]) + if lenNegative == lenOut { + return out + } + prefix := out[0][:lenOut-lenNegative] + trimmed := make([]string, len(out)) + for i, line := range out { + if line == "" { + trimmed[i] = "" + continue + } + if !strings.HasPrefix(line, prefix) { + return out + } + trimmed[i] = strings.TrimPrefix(line, prefix) + } + + return trimmed +} + // AuthoringPrompt creates a prompt template from a CEL environment for the purpose of AI-assisted authoring. func AuthoringPrompt(env *Env) (*Prompt, error) { funcMap := template.FuncMap{ - "split": func(str string) []string { return strings.Split(str, "\n") }, + "split": splitImpl, + "newlineToSpace": func(str string) string { return strings.ReplaceAll(str, "\n", " ") }, } tmpl := template.New("cel").Funcs(funcMap) tmpl, err := tmpl.Parse(authoringPrompt) @@ -47,6 +80,17 @@ func AuthoringPrompt(env *Env) (*Prompt, error) { }, nil } +// AuthoringPromptWithFieldPaths creates a prompt template from a CEL environment for the purpose of AI-assisted authoring. +// Includes documentation for all of the reachable field paths in the environment. +func AuthoringPromptWithFieldPaths(env *Env) (*Prompt, error) { + p, err := AuthoringPrompt(env) + if err != nil { + return nil, err + } + p.fieldPaths = true + return p, nil +} + // Prompt represents the core components of an LLM prompt based on a CEL environment. // // All fields of the prompt may be overwritten / modified with support for rendering the @@ -64,14 +108,22 @@ type Prompt struct { // tmpl is the text template base-configuration for rendering text. tmpl *template.Template + // fieldPaths is a flag to enable including reachable field paths in the prompt. + fieldPaths bool + // env reference used to collect variables, functions, and macros available to the prompt. env *Env } +type promptVariable struct { + *common.Doc + FieldPaths []*common.Doc +} + type promptInst struct { *Prompt - Variables []*common.Doc + Variables []*promptVariable Macros []*common.Doc Functions []*common.Doc UserPrompt string @@ -81,9 +133,28 @@ type promptInst struct { // for use with LLM generators. func (p *Prompt) Render(userPrompt string) string { var buffer strings.Builder - vars := make([]*common.Doc, len(p.env.Variables())) + vars := make([]*promptVariable, len(p.env.Variables())) for i, v := range p.env.Variables() { - vars[i] = v.Documentation() + vars[i] = &promptVariable{Doc: v.Documentation()} + if p.fieldPaths && v.Type().Kind() == types.StructKind { + var fieldPaths []*common.Doc + + paths := fieldPathsForType(p.env.CELTypeProvider(), v.Name(), v.Type()) + if len(paths) < 2 { + paths = nil + } else { + // First path is the variable which is already documented. + paths = paths[1:] + } + for _, path := range paths { + fieldPaths = append(fieldPaths, path.Documentation()) + } + + sort.SliceStable(fieldPaths, func(i, j int) bool { + return fieldPaths[i].Name < fieldPaths[j].Name + }) + vars[i].FieldPaths = fieldPaths + } } sort.SliceStable(vars, func(i, j int) bool { return vars[i].Name < vars[j].Name diff --git a/vendor/github.com/google/cel-go/cel/templates/authoring.tmpl b/vendor/github.com/google/cel-go/cel/templates/authoring.tmpl index d0b0133f1..a921df9b0 100644 --- a/vendor/github.com/google/cel-go/cel/templates/authoring.tmpl +++ b/vendor/github.com/google/cel-go/cel/templates/authoring.tmpl @@ -1,12 +1,29 @@ -{{define "variable"}}{{.Name}} is a {{.Type}}{{if .Description}} - -{{range split .Description}} {{.}} +{{define "fieldPath" }} + * path: `{{.Name}}` + type: `{{.Type}}` + {{- if .Description }} + description: +{{range split .Description }} {{.}} {{end}} {{- end -}} {{- end -}} +{{define "variable" -}} +* name: `{{.Name}}` + type: `{{.Type}}` + {{- if .Description}} + description: +{{range split .Description}} {{.}} +{{end -}} +{{- end -}} +{{- if .FieldPaths }} + attributes: +{{- range .FieldPaths }}{{ template "fieldPath" . }}{{end}} +{{- end -}} +{{- end -}} + {{define "macro" -}} -{{.Name}} macro{{if .Description}} - {{range split .Description}}{{.}} {{end}} +{{.Name}} macro{{if .Description}} - {{newlineToSpace .Description}} {{end}} {{range .Children}}{{range split .Description}} {{.}} {{end}} @@ -22,7 +39,7 @@ {{- end -}} {{define "function" -}} -{{.Name}}{{if .Description}} - {{range split .Description}}{{.}} {{end}} +{{.Name}}{{if .Description}} - {{newlineToSpace .Description}} {{end}} {{range .Children}}{{template "overload" .}}{{end}} {{- end -}} @@ -36,25 +53,26 @@ Only use the following variables, macros, and functions in expressions. {{if .Variables}} Variables: -{{range .Variables}}* {{template "variable" .}} +{{range .Variables -}} +{{template "variable" .}} {{end -}} +{{- end -}} -{{end -}} {{if .Macros}} Macros: {{range .Macros}}* {{template "macro" .}} {{end -}} - {{end -}} + {{if .Functions}} Functions: {{range .Functions}}* {{template "function" .}} {{end -}} - -{{end -}} {{- end -}} +{{- end -}} + {{.GeneralUsage}} {{.UserPrompt}} diff --git a/vendor/github.com/google/cel-go/checker/checker.go b/vendor/github.com/google/cel-go/checker/checker.go index d07d8e799..42d27a428 100644 --- a/vendor/github.com/google/cel-go/checker/checker.go +++ b/vendor/github.com/google/cel-go/checker/checker.go @@ -71,6 +71,11 @@ func Check(parsed *ast.AST, source common.Source, env *Env) (*ast.AST, *common.E // check() deletes some nodes while rewriting the AST. For example the Select operand is // deleted when a variable reference is replaced with a Ident expression. c.AST.ClearUnusedIDs() + if env.jsonFieldNames { + c.AST.SourceInfo().AddExtension( + ast.NewExtension("json_name", ast.NewExtensionVersion(1, 1), ast.ComponentRuntime), + ) + } return c.AST, errs } @@ -718,6 +723,9 @@ func (c *checker) lookupFieldType(exprID int64, structType, fieldName string) (* } if ft, found := c.env.provider.FindStructFieldType(structType, fieldName); found { + if c.env.jsonFieldNames && !ft.IsJSONField { + c.errors.undefinedField(exprID, c.locationByID(exprID), fieldName) + } return ft.Type, found } diff --git a/vendor/github.com/google/cel-go/checker/env.go b/vendor/github.com/google/cel-go/checker/env.go index 6d991eba1..477918c48 100644 --- a/vendor/github.com/google/cel-go/checker/env.go +++ b/vendor/github.com/google/cel-go/checker/env.go @@ -74,6 +74,7 @@ type Env struct { declarations *Scopes aggLitElemType aggregateLiteralElementType filteredOverloadIDs map[string]struct{} + jsonFieldNames bool } // NewEnv returns a new *Env with the given parameters. @@ -104,6 +105,7 @@ func NewEnv(container *containers.Container, provider types.Provider, opts ...Op declarations: declarations, aggLitElemType: aggLitElemType, filteredOverloadIDs: filteredOverloadIDs, + jsonFieldNames: envOptions.jsonFieldNames, }, nil } @@ -273,12 +275,31 @@ func (e *Env) setFunction(fn *decls.FunctionDecl) []errorMsg { return errMsgs } +func maybeMergeConstant(a *decls.VariableDecl, b *decls.VariableDecl) (*decls.VariableDecl, errorMsg) { + if b.Value() != nil { + if a.Value() == nil { + return b, "" + } + eq, ok := a.Value().Equal(b.Value()).Value().(bool) + if ok && eq { + return a, "" + } + return nil, constantConflictError(b.Name()) + } + return a, "" +} + // addIdent adds the Decl to the declarations in the Env. // Returns a non-empty errorMsg if the identifier is already declared in the scope. func (e *Env) addIdent(decl *decls.VariableDecl) errorMsg { current := e.declarations.FindIdentInScope(decl.Name()) if current != nil { if current.DeclarationIsEquivalent(decl) { + decl, errMsg := maybeMergeConstant(current, decl) + if errMsg != "" { + return errMsg + } + e.declarations.AddIdent(decl) return "" } return overlappingIdentifierError(decl.Name()) @@ -325,6 +346,10 @@ func (e *Env) exitScope() *Env { // may be accumulated into an error at a later point in execution. type errorMsg string +func constantConflictError(name string) errorMsg { + return errorMsg(fmt.Sprintf("conflicting constant definitions for name '%s'", name)) +} + func overlappingIdentifierError(name string) errorMsg { return errorMsg(fmt.Sprintf("overlapping identifier for name '%s'", name)) } diff --git a/vendor/github.com/google/cel-go/checker/options.go b/vendor/github.com/google/cel-go/checker/options.go index 0560c3813..af714323b 100644 --- a/vendor/github.com/google/cel-go/checker/options.go +++ b/vendor/github.com/google/cel-go/checker/options.go @@ -18,6 +18,7 @@ type options struct { crossTypeNumericComparisons bool homogeneousAggregateLiterals bool validatedDeclarations *Scopes + jsonFieldNames bool } // Option is a functional option for configuring the type-checker @@ -40,3 +41,11 @@ func ValidatedDeclarations(env *Env) Option { return nil } } + +// JSONFieldNames enables the use of json names instead of the standard protobuf snake_case field names +func JSONFieldNames(enabled bool) Option { + return func(opts *options) error { + opts.jsonFieldNames = enabled + return nil + } +} diff --git a/vendor/github.com/google/cel-go/common/ast/ast.go b/vendor/github.com/google/cel-go/common/ast/ast.go index 3c5ee0c80..3ae2e1063 100644 --- a/vendor/github.com/google/cel-go/common/ast/ast.go +++ b/vendor/github.com/google/cel-go/common/ast/ast.go @@ -231,6 +231,11 @@ func CopySourceInfo(info *SourceInfo) *SourceInfo { for id, call := range info.macroCalls { callsCopy[id] = defaultFactory.CopyExpr(call) } + var extCopy []Extension + if len(info.extensions) > 0 { + extCopy = make([]Extension, len(info.extensions)) + copy(extCopy, info.extensions) + } return &SourceInfo{ syntax: info.syntax, desc: info.desc, @@ -239,6 +244,7 @@ func CopySourceInfo(info *SourceInfo) *SourceInfo { baseCol: info.baseCol, offsetRanges: rangesCopy, macroCalls: callsCopy, + extensions: extCopy, } } @@ -252,6 +258,9 @@ type SourceInfo struct { baseCol int32 offsetRanges map[int64]OffsetRange macroCalls map[int64]Expr + + // extensions indicate versioned optional features which affect the execution of one or more CEL component. + extensions []Extension } // RenumberIDs performs an in-place update of the expression IDs within the SourceInfo. @@ -420,6 +429,34 @@ func (s *SourceInfo) ComputeOffsetAbsolute(line, col int32) int32 { return offset + col } +// Extensions returns the set of extensions present in the source. +func (s *SourceInfo) Extensions() []Extension { + var extensions []Extension + if s == nil { + return extensions + } + return s.extensions +} + +// HasExtension returns whether the source info contains the extension which satisfies the minimum version requirement. +// +// For an extension to be considered 'present' it must have the same major version as the minVersion and a minor version +// at least as great as the lowest minor version specified. +func (s *SourceInfo) HasExtension(id string, minVersion ExtensionVersion) bool { + for _, ext := range s.Extensions() { + return ext.ID == id && ext.Version.Major == minVersion.Major && ext.Version.Minor >= minVersion.Minor + } + return false +} + +// AddExtension adds an extension record into the SourceInfo. +func (s *SourceInfo) AddExtension(ext Extension) { + if s == nil { + return + } + s.extensions = append(s.extensions, ext) +} + // OffsetRange captures the start and stop positions of a section of text in the input expression. type OffsetRange struct { Start int32 @@ -489,6 +526,53 @@ func (r *ReferenceInfo) Equals(other *ReferenceInfo) bool { return true } +// NewExtension creates an Extension to be recorded on the SourceInfo. +func NewExtension(id string, version ExtensionVersion, components ...ExtensionComponent) Extension { + return Extension{ + ID: id, + Version: version, + Components: components, + } +} + +// Extension represents a versioned, optional feature present in the AST that affects CEL component behavior. +type Extension struct { + // ID indicates the unique name of the extension. + ID string + // Version indicates the major / minor version. + Version ExtensionVersion + // Components enumerates the CEL components affected by the feature. + Components []ExtensionComponent +} + +// NewExtensionVersion creates a new extension version with a major, minor version. +func NewExtensionVersion(major, minor int64) ExtensionVersion { + return ExtensionVersion{Major: major, Minor: minor} +} + +// ExtensionVersion represents a semantic version with a major and minor number. +type ExtensionVersion struct { + // Major version of the extension. + // All versions with the same major number are expected to be compatible with all minor version changes. + Major int64 + + // Minor version of the extension which indicates that some small non-semantic change has been made to + // the extension. + Minor int64 +} + +// ExtensionComponent indicates which CEL component is affected. +type ExtensionComponent int + +const ( + // ComponentParser means the feature affects expression parsing. + ComponentParser ExtensionComponent = iota + 1 + // ComponentTypeChecker means the feature affects type-checking. + ComponentTypeChecker + // ComponentRuntime alters program planning or evaluation of the AST. + ComponentRuntime +) + type maxIDVisitor struct { maxID int64 *baseVisitor diff --git a/vendor/github.com/google/cel-go/common/ast/conversion.go b/vendor/github.com/google/cel-go/common/ast/conversion.go index 435d8f654..380f8c118 100644 --- a/vendor/github.com/google/cel-go/common/ast/conversion.go +++ b/vendor/github.com/google/cel-go/common/ast/conversion.go @@ -27,6 +27,19 @@ import ( structpb "google.golang.org/protobuf/types/known/structpb" ) +var ( + pbComponentMap = map[exprpb.SourceInfo_Extension_Component]ExtensionComponent{ + exprpb.SourceInfo_Extension_COMPONENT_PARSER: ComponentParser, + exprpb.SourceInfo_Extension_COMPONENT_TYPE_CHECKER: ComponentTypeChecker, + exprpb.SourceInfo_Extension_COMPONENT_RUNTIME: ComponentRuntime, + } + componentPBMap = map[ExtensionComponent]exprpb.SourceInfo_Extension_Component{ + ComponentParser: exprpb.SourceInfo_Extension_COMPONENT_PARSER, + ComponentTypeChecker: exprpb.SourceInfo_Extension_COMPONENT_TYPE_CHECKER, + ComponentRuntime: exprpb.SourceInfo_Extension_COMPONENT_RUNTIME, + } +) + // ToProto converts an AST to a CheckedExpr protobouf. func ToProto(ast *AST) (*exprpb.CheckedExpr, error) { refMap := make(map[int64]*exprpb.Reference, len(ast.ReferenceMap())) @@ -534,6 +547,25 @@ func SourceInfoToProto(info *SourceInfo) (*exprpb.SourceInfo, error) { } sourceInfo.MacroCalls[id] = call } + for _, ext := range info.Extensions() { + var components []exprpb.SourceInfo_Extension_Component + for _, c := range ext.Components { + comp, found := componentPBMap[c] + if found { + components = append(components, comp) + } + } + ver := &exprpb.SourceInfo_Extension_Version{ + Major: ext.Version.Major, + Minor: ext.Version.Minor, + } + pbExt := &exprpb.SourceInfo_Extension{ + Id: ext.ID, + Version: ver, + AffectedComponents: components, + } + sourceInfo.Extensions = append(sourceInfo.Extensions, pbExt) + } return sourceInfo, nil } @@ -556,6 +588,23 @@ func ProtoToSourceInfo(info *exprpb.SourceInfo) (*SourceInfo, error) { } sourceInfo.SetMacroCall(id, call) } + for _, pbExt := range info.GetExtensions() { + var components []ExtensionComponent + for _, c := range pbExt.GetAffectedComponents() { + comp, found := pbComponentMap[*c.Enum()] + if found { + components = append(components, comp) + } + } + sourceInfo.AddExtension(NewExtension( + pbExt.GetId(), + NewExtensionVersion( + pbExt.GetVersion().GetMajor(), + pbExt.GetVersion().GetMinor(), + ), + components..., + )) + } return sourceInfo, nil } diff --git a/vendor/github.com/google/cel-go/common/decls/decls.go b/vendor/github.com/google/cel-go/common/decls/decls.go index a4a51c3f2..cd4d3a565 100644 --- a/vendor/github.com/google/cel-go/common/decls/decls.go +++ b/vendor/github.com/google/cel-go/common/decls/decls.go @@ -270,7 +270,7 @@ func (f *FunctionDecl) AddOverload(overload *OverloadDecl) error { if oID == overload.ID() { if o.SignatureEquals(overload) && o.IsNonStrict() == overload.IsNonStrict() { // Allow redefinition of an overload implementation so long as the signatures match. - if overload.hasBinding() { + if overload.HasBinding() { f.overloads[oID] = overload } // Allow redefinition of the doc string. @@ -303,6 +303,14 @@ func (f *FunctionDecl) OverloadDecls() []*OverloadDecl { return overloads } +// HasSingletonBinding indicates whether the function has a singleton binding definition. +func (f *FunctionDecl) HasSingletonBinding() bool { + if f == nil { + return false + } + return f.singleton != nil +} + // HasLateBinding returns true if the function has late bindings. A function cannot mix late bindings with other bindings. func (f *FunctionDecl) HasLateBinding() bool { if f == nil { @@ -328,7 +336,7 @@ func (f *FunctionDecl) Bindings() ([]*functions.Overload, error) { for _, oID := range f.overloadOrdinals { o := f.overloads[oID] hasLateBinding = hasLateBinding || o.HasLateBinding() - if o.hasBinding() { + if o.HasBinding() { overload := &functions.Overload{ Operator: o.ID(), Unary: o.guardedUnaryOp(f.Name(), f.disableTypeGuards), @@ -740,8 +748,8 @@ func (o *OverloadDecl) SignatureOverlaps(other *OverloadDecl) bool { return argsOverlap } -// hasBinding indicates whether the overload already has a definition. -func (o *OverloadDecl) hasBinding() bool { +// HasBinding indicates whether the overload already has a definition. +func (o *OverloadDecl) HasBinding() bool { return o != nil && (o.unaryOp != nil || o.binaryOp != nil || o.functionOp != nil) } @@ -842,7 +850,7 @@ func OverloadExamples(examples ...string) OverloadOpt { // type-guard which ensures runtime type agreement between the overload signature and runtime argument types. func UnaryBinding(binding functions.UnaryOp) OverloadOpt { return func(o *OverloadDecl) (*OverloadDecl, error) { - if o.hasBinding() { + if o.HasBinding() { return nil, fmt.Errorf("overload already has a binding: %s", o.ID()) } if len(o.ArgTypes()) != 1 { @@ -860,7 +868,7 @@ func UnaryBinding(binding functions.UnaryOp) OverloadOpt { // type-guard which ensures runtime type agreement between the overload signature and runtime argument types. func BinaryBinding(binding functions.BinaryOp) OverloadOpt { return func(o *OverloadDecl) (*OverloadDecl, error) { - if o.hasBinding() { + if o.HasBinding() { return nil, fmt.Errorf("overload already has a binding: %s", o.ID()) } if len(o.ArgTypes()) != 2 { @@ -878,7 +886,7 @@ func BinaryBinding(binding functions.BinaryOp) OverloadOpt { // type-guard which ensures runtime type agreement between the overload signature and runtime argument types. func FunctionBinding(binding functions.FunctionOp) OverloadOpt { return func(o *OverloadDecl) (*OverloadDecl, error) { - if o.hasBinding() { + if o.HasBinding() { return nil, fmt.Errorf("overload already has a binding: %s", o.ID()) } if o.hasLateBinding { @@ -893,7 +901,7 @@ func FunctionBinding(binding functions.FunctionOp) OverloadOpt { // This is useful for functions which have side-effects or are not deterministically computable. func LateFunctionBinding() OverloadOpt { return func(o *OverloadDecl) (*OverloadDecl, error) { - if o.hasBinding() { + if o.HasBinding() { return nil, fmt.Errorf("overload already has a binding: %s", o.ID()) } o.hasLateBinding = true diff --git a/vendor/github.com/google/cel-go/common/doc.go b/vendor/github.com/google/cel-go/common/doc.go index 06eae3642..c10742c28 100644 --- a/vendor/github.com/google/cel-go/common/doc.go +++ b/vendor/github.com/google/cel-go/common/doc.go @@ -37,6 +37,8 @@ const ( DocMacro // DocExample represents example documentation. DocExample + // DocField represents documentation for a struct field. + DocField ) // Doc holds the documentation details for a specific program element like @@ -163,6 +165,17 @@ func NewExampleDoc(ex string) *Doc { } } +// NewFieldDoc creates a new Doc struct for documenting a struct field. +func NewFieldDoc(name, celType, description string, examples ...*Doc) *Doc { + return &Doc{ + Kind: DocField, + Name: name, + Type: celType, + Description: description, + Children: examples, + } +} + // Documentor is an interface for types that can provide their own documentation. type Documentor interface { // Documentation returns the documentation coded by the DocKind to assist diff --git a/vendor/github.com/google/cel-go/common/env/env.go b/vendor/github.com/google/cel-go/common/env/env.go index e9c86d3ea..85ec85cd0 100644 --- a/vendor/github.com/google/cel-go/common/env/env.go +++ b/vendor/github.com/google/cel-go/common/env/env.go @@ -50,6 +50,7 @@ type Config struct { Functions []*Function `yaml:"functions,omitempty"` Validators []*Validator `yaml:"validators,omitempty"` Features []*Feature `yaml:"features,omitempty"` + Limits []*Limit `yaml:"limits,omitempty"` } // Validate validates the whole configuration is well-formed. @@ -92,6 +93,11 @@ func (c *Config) Validate() error { errs = append(errs, err) } } + for _, limit := range c.Limits { + if err := limit.Validate(); err != nil { + errs = append(errs, err) + } + } for _, val := range c.Validators { if err := val.Validate(); err != nil { errs = append(errs, err) @@ -206,6 +212,12 @@ func (c *Config) AddFeatures(feats ...*Feature) *Config { return c } +// AddLimits appends one or more limits to the config. +func (c *Config) AddLimits(limits ...*Limit) *Config { + c.Limits = append(c.Limits, limits...) + return c +} + // NewImport returns a serializable import value from the qualified type name. func NewImport(name string) *Import { return &Import{Name: name} @@ -734,6 +746,29 @@ func (feat *Feature) Validate() error { return nil } +// Limit represents a named limit in the CEL environment. This is used to control +// the complexity tolerated before failing parsing, type checking, or planning. +type Limit struct { + Name string `yaml:"name"` + Value int `yaml:"value"` +} + +// NewLimit creates a new limit. +func NewLimit(name string, value int) *Limit { + return &Limit{name, value} +} + +// Validate validates a limit. +func (l *Limit) Validate() error { + if l == nil { + return errors.New("invalid limit: nil") + } + if l.Name == "" { + return errors.New("invalid limit: missing name") + } + return nil +} + // NewTypeDesc describes a simple or complex type with parameters. func NewTypeDesc(typeName string, params ...*TypeDesc) *TypeDesc { return &TypeDesc{TypeName: typeName, Params: params} @@ -796,6 +831,14 @@ func (td *TypeDesc) Validate() error { return fmt.Errorf("invalid type: optional_type expects 1 parameter, got %d", len(td.Params)) } return td.Params[0].Validate() + case "type": + if len(td.Params) == 0 { + return nil + } + if len(td.Params) != 1 { + return fmt.Errorf("invalid type: type expects 0 or 1 parameters, got %d", len(td.Params)) + } + return td.Params[0].Validate() default: } return nil @@ -832,6 +875,15 @@ func (td *TypeDesc) AsCELType(tp types.Provider) (*types.Type, error) { return nil, err } return types.NewOptionalType(et), nil + case "type": + if len(td.Params) == 0 { + return types.TypeType, nil + } + pt, err := td.Params[0].AsCELType(tp) + if err != nil { + return nil, err + } + return types.NewTypeTypeWithParam(pt), nil default: if td.IsTypeParam { return types.NewTypeParamType(td.TypeName), nil diff --git a/vendor/github.com/google/cel-go/common/types/err.go b/vendor/github.com/google/cel-go/common/types/err.go index 17ab1a95e..3216ff1c4 100644 --- a/vendor/github.com/google/cel-go/common/types/err.go +++ b/vendor/github.com/google/cel-go/common/types/err.go @@ -113,6 +113,9 @@ func ValOrErr(val ref.Val, format string, args ...any) ref.Val { // WrapErr wraps an existing Go error value into a CEL Err value. func WrapErr(err error) ref.Val { + if err, ok := err.(*Err); ok { + return err + } return &Err{error: err} } diff --git a/vendor/github.com/google/cel-go/common/types/list.go b/vendor/github.com/google/cel-go/common/types/list.go index 324c0f969..028770ed6 100644 --- a/vendor/github.com/google/cel-go/common/types/list.go +++ b/vendor/github.com/google/cel-go/common/types/list.go @@ -126,16 +126,7 @@ func (l *baseList) Add(other ref.Val) ref.Val { if !ok { return MaybeNoSuchOverloadErr(other) } - if l.Size() == IntZero { - return other - } - if otherList.Size() == IntZero { - return l - } - return &concatList{ - Adapter: l.Adapter, - prevList: l, - nextList: otherList} + return newConcatList(l.Adapter, l, otherList) } // Contains implements the traits.Container interface method. @@ -353,9 +344,27 @@ func (l *mutableList) ToImmutableList() traits.Lister { // The `Adapter` enables native type to CEL type conversions. type concatList struct { Adapter - value any - prevList traits.Lister - nextList traits.Lister + value any + prevList traits.Lister + nextList traits.Lister + cachedSize ref.Val +} + +func newConcatList(adapter Adapter, prevList, nextList traits.Lister) ref.Val { + prevSize := prevList.Size().(Int) + nextSize := nextList.Size().(Int) + if prevSize == IntZero { + return nextList.(ref.Val) + } + if nextSize == IntZero { + return prevList.(ref.Val) + } + return &concatList{ + Adapter: adapter, + prevList: prevList, + nextList: nextList, + cachedSize: prevSize.Add(nextSize), + } } // Add implements the traits.Adder interface method. @@ -364,16 +373,7 @@ func (l *concatList) Add(other ref.Val) ref.Val { if !ok { return MaybeNoSuchOverloadErr(other) } - if l.Size() == IntZero { - return other - } - if otherList.Size() == IntZero { - return l - } - return &concatList{ - Adapter: l.Adapter, - prevList: l, - nextList: otherList} + return newConcatList(l.Adapter, l, otherList) } // Contains implements the traits.Container interface method. @@ -477,7 +477,7 @@ func (l *concatList) Iterator() traits.Iterator { // Size implements the traits.Sizer interface method. func (l *concatList) Size() ref.Val { - return l.prevList.Size().(Int).Add(l.nextList.Size()) + return l.cachedSize } // String converts the concatenated list to a human-readable string. diff --git a/vendor/github.com/google/cel-go/common/types/object.go b/vendor/github.com/google/cel-go/common/types/object.go index c44eaa942..bb2a09e87 100644 --- a/vendor/github.com/google/cel-go/common/types/object.go +++ b/vendor/github.com/google/cel-go/common/types/object.go @@ -187,8 +187,14 @@ func (o *protoObj) format(sb *strings.Builder) { if i > 0 { sb.WriteString(", ") } - sb.WriteString(fmt.Sprintf("%s: ", field.Name())) - formatTo(sb, o.Get(String(field.Name()))) + name := String(field.Name()) + if field.IsExtension() { + name = String(field.FullName()) + fmt.Fprintf(sb, "`%s`: ", name) + } else { + fmt.Fprintf(sb, "%s: ", name) + } + formatTo(sb, o.Get(name)) } sb.WriteString("}") } diff --git a/vendor/github.com/google/cel-go/common/types/optional.go b/vendor/github.com/google/cel-go/common/types/optional.go index b8685ebf5..0d861823d 100644 --- a/vendor/github.com/google/cel-go/common/types/optional.go +++ b/vendor/github.com/google/cel-go/common/types/optional.go @@ -25,7 +25,7 @@ import ( var ( // OptionalType indicates the runtime type of an optional value. - OptionalType = NewOpaqueType("optional_type") + OptionalType = NewOpaqueType("optional_type", DynType) // OptionalNone is a sentinel value which is used to indicate an empty optional value. OptionalNone = &Optional{} @@ -59,6 +59,9 @@ func (o *Optional) ConvertToNative(typeDesc reflect.Type) (any, error) { if !o.HasValue() { return nil, errors.New("optional.none() dereference") } + if typeDesc == reflect.TypeFor[*Optional]() { + return o, nil + } return o.value.ConvertToNative(typeDesc) } diff --git a/vendor/github.com/google/cel-go/common/types/pb/file.go b/vendor/github.com/google/cel-go/common/types/pb/file.go index e323afb1d..3a8bdf0b2 100644 --- a/vendor/github.com/google/cel-go/common/types/pb/file.go +++ b/vendor/github.com/google/cel-go/common/types/pb/file.go @@ -32,7 +32,7 @@ func newFileDescription(fileDesc protoreflect.FileDescriptor, pbdb *Db) (*FileDe } types := make(map[string]*TypeDescription) for name, msgType := range metadata.msgTypes { - types[name] = newTypeDescription(name, msgType, pbdb.extensions) + types[name] = newTypeDescription(name, msgType, pbdb) } fileExtMap := make(extensionMap) for typeName, extensions := range metadata.msgExtensionMap { @@ -42,12 +42,13 @@ func newFileDescription(fileDesc protoreflect.FileDescriptor, pbdb *Db) (*FileDe } for _, ext := range extensions { extDesc := dynamicpb.NewExtensionType(ext).TypeDescriptor() - messageExtMap[string(ext.FullName())] = newFieldDescription(extDesc) + messageExtMap[string(ext.FullName())] = newFieldDescription(extDesc, pbdb.jsonFieldNames) } fileExtMap[typeName] = messageExtMap } return &FileDescription{ name: fileDesc.Path(), + desc: fileDesc, types: types, enums: enums, }, fileExtMap @@ -56,6 +57,7 @@ func newFileDescription(fileDesc protoreflect.FileDescriptor, pbdb *Db) (*FileDe // FileDescription holds a map of all types and enum values declared within a proto file. type FileDescription struct { name string + desc protoreflect.FileDescriptor types map[string]*TypeDescription enums map[string]*EnumValueDescription } @@ -68,6 +70,7 @@ func (fd *FileDescription) Copy(pbdb *Db) *FileDescription { } return &FileDescription{ name: fd.name, + desc: fd.desc, types: typesCopy, enums: fd.enums, } @@ -78,6 +81,11 @@ func (fd *FileDescription) GetName() string { return fd.name } +// FileDescriptor returns the proto file descriptor associated with the file representation. +func (fd *FileDescription) FileDescriptor() protoreflect.FileDescriptor { + return fd.desc +} + // GetEnumDescription returns an EnumDescription for a qualified enum value // name declared within the .proto file. func (fd *FileDescription) GetEnumDescription(enumName string) (*EnumValueDescription, bool) { diff --git a/vendor/github.com/google/cel-go/common/types/pb/pb.go b/vendor/github.com/google/cel-go/common/types/pb/pb.go index eadebcb04..c6fdfc695 100644 --- a/vendor/github.com/google/cel-go/common/types/pb/pb.go +++ b/vendor/github.com/google/cel-go/common/types/pb/pb.go @@ -42,6 +42,9 @@ type Db struct { files []*FileDescription // extensions contains the mapping between a given type name, extension name and its FieldDescription extensions map[string]map[string]*FieldDescription + + // jsonFieldNames indicates whether json-style names are supported as proto field names. + jsonFieldNames bool } // extensionsMap is a type alias to a map[typeName]map[extensionName]*FieldDescription @@ -81,13 +84,27 @@ func Merge(dstPB, srcPB proto.Message) error { return nil } +// DbOption modifies feature flags enabled on the proto database. +type DbOption func(*Db) *Db + +// JSONFieldNames configures the Db to support proto field accesses by their JSON names. +func JSONFieldNames(enabled bool) DbOption { + return func(db *Db) *Db { + db.jsonFieldNames = enabled + return db + } +} + // NewDb creates a new `pb.Db` with an empty type name to file description map. -func NewDb() *Db { +func NewDb(opts ...DbOption) *Db { pbdb := &Db{ revFileDescriptorMap: make(map[string]*FileDescription), files: []*FileDescription{}, extensions: make(extensionMap), } + for _, o := range opts { + pbdb = o(pbdb) + } // The FileDescription objects in the default db contain lazily initialized TypeDescription // values which may point to the state contained in the DefaultDb irrespective of this shallow // copy; however, the type graph for a field is idempotently computed, and is guaranteed to @@ -100,9 +117,15 @@ func NewDb() *Db { return pbdb } +// JSONFieldNames indicates whether the database is configured for proto field accesses by JSON names. +func (pbdb *Db) JSONFieldNames() bool { + return pbdb.jsonFieldNames +} + // Copy creates a copy of the current database with its own internal descriptor mapping. func (pbdb *Db) Copy() *Db { copy := NewDb() + copy.jsonFieldNames = pbdb.jsonFieldNames for _, fd := range pbdb.files { hasFile := false for _, fd2 := range copy.files { diff --git a/vendor/github.com/google/cel-go/common/types/pb/type.go b/vendor/github.com/google/cel-go/common/types/pb/type.go index 171494f07..8d7d1b299 100644 --- a/vendor/github.com/google/cel-go/common/types/pb/type.go +++ b/vendor/github.com/google/cel-go/common/types/pb/type.go @@ -40,68 +40,92 @@ type description interface { // newTypeDescription produces a TypeDescription value for the fully-qualified proto type name // with a given descriptor. -func newTypeDescription(typeName string, desc protoreflect.MessageDescriptor, extensions extensionMap) *TypeDescription { +func newTypeDescription(typeName string, desc protoreflect.MessageDescriptor, pbdb *Db) *TypeDescription { msgType := dynamicpb.NewMessageType(desc) msgZero := dynamicpb.NewMessage(desc) fieldMap := map[string]*FieldDescription{} + jsonFieldMap := map[string]*FieldDescription{} fields := desc.Fields() for i := 0; i < fields.Len(); i++ { f := fields.Get(i) - fieldMap[string(f.Name())] = newFieldDescription(f) + fd := newFieldDescription(f, pbdb.jsonFieldNames) + fieldMap[fd.Name()] = fd + if pbdb.jsonFieldNames { + jsonFieldMap[fd.JSONName()] = fd + } } return &TypeDescription{ - typeName: typeName, - desc: desc, - msgType: msgType, - fieldMap: fieldMap, - extensions: extensions, - reflectType: reflectTypeOf(msgZero), - zeroMsg: zeroValueOf(msgZero), + typeName: typeName, + desc: desc, + msgType: msgType, + fieldMap: fieldMap, + jsonFieldMap: jsonFieldMap, + extensions: pbdb.extensions, + reflectType: reflectTypeOf(msgZero), + zeroMsg: zeroValueOf(msgZero), + jsonFieldNames: pbdb.jsonFieldNames, } } // TypeDescription is a collection of type metadata relevant to expression // checking and evaluation. type TypeDescription struct { - typeName string - desc protoreflect.MessageDescriptor - msgType protoreflect.MessageType - fieldMap map[string]*FieldDescription - extensions extensionMap - reflectType reflect.Type - zeroMsg proto.Message + typeName string + desc protoreflect.MessageDescriptor + msgType protoreflect.MessageType + fieldMap map[string]*FieldDescription + jsonFieldMap map[string]*FieldDescription + extensions extensionMap + reflectType reflect.Type + zeroMsg proto.Message + // jsonFieldNames indicates if the type's fields are accessible via their JSON names. + jsonFieldNames bool } // Copy copies the type description with updated references to the Db. func (td *TypeDescription) Copy(pbdb *Db) *TypeDescription { return &TypeDescription{ - typeName: td.typeName, - desc: td.desc, - msgType: td.msgType, - fieldMap: td.fieldMap, - extensions: pbdb.extensions, - reflectType: td.reflectType, - zeroMsg: td.zeroMsg, + typeName: td.typeName, + desc: td.desc, + msgType: td.msgType, + fieldMap: td.fieldMap, + jsonFieldMap: td.jsonFieldMap, + extensions: pbdb.extensions, + reflectType: td.reflectType, + zeroMsg: td.zeroMsg, + jsonFieldNames: td.jsonFieldNames, } } // FieldMap returns a string field name to FieldDescription map. func (td *TypeDescription) FieldMap() map[string]*FieldDescription { + if td.jsonFieldNames { + return td.jsonFieldMap + } return td.fieldMap } // FieldByName returns (FieldDescription, true) if the field name is declared within the type. func (td *TypeDescription) FieldByName(name string) (*FieldDescription, bool) { + if td.jsonFieldNames { + fd, found := td.jsonFieldMap[name] + if found { + return fd, true + } + } + fd, found := td.fieldMap[name] if found { return fd, true } + extFieldMap, found := td.extensions[td.typeName] - if !found { - return nil, false + if found { + fd, found = extFieldMap[name] + return fd, found } - fd, found = extFieldMap[name] - return fd, found + + return nil, false } // MaybeUnwrap accepts a proto message as input and unwraps it to a primitive CEL type if possible. @@ -132,7 +156,7 @@ func (td *TypeDescription) Zero() proto.Message { } // newFieldDescription creates a new field description from a protoreflect.FieldDescriptor. -func newFieldDescription(fieldDesc protoreflect.FieldDescriptor) *FieldDescription { +func newFieldDescription(fieldDesc protoreflect.FieldDescriptor, jsonFieldNames bool) *FieldDescription { var reflectType reflect.Type var zeroMsg proto.Message switch fieldDesc.Kind() { @@ -168,15 +192,16 @@ func newFieldDescription(fieldDesc protoreflect.FieldDescriptor) *FieldDescripti } var keyType, valType *FieldDescription if fieldDesc.IsMap() { - keyType = newFieldDescription(fieldDesc.MapKey()) - valType = newFieldDescription(fieldDesc.MapValue()) + keyType = newFieldDescription(fieldDesc.MapKey(), jsonFieldNames) + valType = newFieldDescription(fieldDesc.MapValue(), jsonFieldNames) } return &FieldDescription{ - desc: fieldDesc, - KeyType: keyType, - ValueType: valType, - reflectType: reflectType, - zeroMsg: zeroValueOf(zeroMsg), + desc: fieldDesc, + KeyType: keyType, + ValueType: valType, + reflectType: reflectType, + zeroMsg: zeroValueOf(zeroMsg), + jsonFieldName: jsonFieldNames, } } @@ -187,9 +212,10 @@ type FieldDescription struct { // ValueType holds the value FieldDescription for map fields. ValueType *FieldDescription - desc protoreflect.FieldDescriptor - reflectType reflect.Type - zeroMsg proto.Message + desc protoreflect.FieldDescriptor + reflectType reflect.Type + zeroMsg proto.Message + jsonFieldName bool } // CheckedType returns the type-definition used at type-check time. @@ -218,6 +244,14 @@ func (fd *FieldDescription) Descriptor() protoreflect.FieldDescriptor { return fd.desc } +// Documentation returns the documentation for the field. +func (fd *FieldDescription) Documentation() string { + if parentFile := fd.desc.ParentFile(); parentFile != nil { + return parentFile.SourceLocations().ByDescriptor(fd.desc).LeadingComments + } + return "" +} + // IsSet returns whether the field is set on the target value, per the proto presence conventions // of proto2 or proto3 accordingly. // @@ -321,11 +355,20 @@ func (fd *FieldDescription) MaybeUnwrapDynamic(msg protoreflect.Message) (any, b return unwrapDynamic(fd, msg) } -// Name returns the CamelCase name of the field within the proto-based struct. +// Name returns the snake_case name of the field within the proto-based struct. func (fd *FieldDescription) Name() string { return string(fd.desc.Name()) } +// JSONName returns the JSON name of the field, if present. +func (fd *FieldDescription) JSONName() string { + jsonName := fd.desc.JSONName() + if len(jsonName) != 0 { + return jsonName + } + return string(fd.desc.Name()) +} + // ProtoKind returns the protobuf reflected kind of the field. func (fd *FieldDescription) ProtoKind() protoreflect.Kind { return fd.desc.Kind() diff --git a/vendor/github.com/google/cel-go/common/types/provider.go b/vendor/github.com/google/cel-go/common/types/provider.go index 936a4e28b..1bb2c11ed 100644 --- a/vendor/github.com/google/cel-go/common/types/provider.go +++ b/vendor/github.com/google/cel-go/common/types/provider.go @@ -81,6 +81,9 @@ type FieldType struct { // GetFrom retrieves the field value on the input object, if set. GetFrom ref.FieldGetter + + // IsJSONField + IsJSONField bool } // Registry provides type information for a set of registered types. @@ -93,11 +96,40 @@ type Registry struct { // provider which can create new instances of the provided message or any // message that proto depends upon in its FileDescriptor. func NewRegistry(types ...proto.Message) (*Registry, error) { - p := &Registry{ + return NewProtoRegistry(ProtoTypeDefs(types...)) +} + +// RegistryOption configures the behavior of the registry. +type RegistryOption func(r *Registry) (*Registry, error) + +// JSONFieldNames configures JSON field name support within the protobuf types in the registry. +func JSONFieldNames(enabled bool) RegistryOption { + return func(r *Registry) (*Registry, error) { + err := r.WithJSONFieldNames(enabled) + return r, err + } +} + +// ProtoTypeDefs creates a RegistryOption which registers the individual proto messages with the registry. +func ProtoTypeDefs(types ...proto.Message) RegistryOption { + return func(r *Registry) (*Registry, error) { + for _, msgType := range types { + err := r.RegisterMessage(msgType) + if err != nil { + return nil, err + } + } + return r, nil + } +} + +// NewProtoRegistry creates a proto-based registry with a set of configurable options. +func NewProtoRegistry(opts ...RegistryOption) (*Registry, error) { + r := &Registry{ revTypeMap: make(map[string]*Type), pbdb: pb.NewDb(), } - err := p.RegisterType( + err := r.RegisterType( BoolType, BytesType, DoubleType, @@ -114,19 +146,19 @@ func NewRegistry(types ...proto.Message) (*Registry, error) { return nil, err } // This block ensures that the well-known protobuf types are registered by default. - for _, fd := range p.pbdb.FileDescriptions() { - err = p.registerAllTypes(fd) + for _, fd := range r.pbdb.FileDescriptions() { + err = r.registerAllTypes(fd) if err != nil { return nil, err } } - for _, msgType := range types { - err = p.RegisterMessage(msgType) + for _, opt := range opts { + r, err = opt(r) if err != nil { return nil, err } } - return p, nil + return r, nil } // NewEmptyRegistry returns a registry which is completely unconfigured. @@ -149,6 +181,28 @@ func (p *Registry) Copy() *Registry { return copy } +// JSONFieldNames returns whether json field names are enabled in this registry. +func (p *Registry) JSONFieldNames() bool { + return p.pbdb.JSONFieldNames() +} + +// WithJSONFieldNames configures the registry with the JSON field name support enabled or disabled. +func (p *Registry) WithJSONFieldNames(enabled bool) error { + if enabled == p.pbdb.JSONFieldNames() { + return nil + } + newDB := pb.NewDb(pb.JSONFieldNames(enabled)) + files := p.pbdb.FileDescriptions() + for _, fd := range files { + _, err := newDB.RegisterDescriptor(fd.FileDescriptor()) + if err != nil { + return err + } + } + p.pbdb = newDB + return nil +} + // EnumValue returns the numeric value of the given enum value name. func (p *Registry) EnumValue(enumName string) ref.Val { enumVal, found := p.pbdb.DescribeEnum(enumName) @@ -172,9 +226,11 @@ func (p *Registry) FindFieldType(structType, fieldName string) (*ref.FieldType, return nil, false } return &ref.FieldType{ - Type: field.CheckedType(), - IsSet: field.IsSet, - GetFrom: field.GetFrom}, true + Type: field.CheckedType(), + IsSet: field.IsSet, + GetFrom: field.GetFrom, + IsJSONField: p.pbdb.JSONFieldNames() && fieldName == field.JSONName(), + }, true } // FindStructFieldNames returns the set of field names for the given struct type, @@ -206,9 +262,25 @@ func (p *Registry) FindStructFieldType(structType, fieldName string) (*FieldType return nil, false } return &FieldType{ - Type: fieldDescToCELType(field), - IsSet: field.IsSet, - GetFrom: field.GetFrom}, true + Type: fieldDescToCELType(field), + IsSet: field.IsSet, + GetFrom: field.GetFrom, + IsJSONField: p.pbdb.JSONFieldNames() && fieldName == field.JSONName(), + }, true +} + +// FindStructFieldDescription returns documentation for a field if available. +// Returns false if the field could not be found. +func (p *Registry) FindStructFieldDescription(structType, fieldName string) (string, bool) { + msgType, found := p.pbdb.DescribeType(structType) + if !found { + return "", false + } + field, found := msgType.FieldByName(fieldName) + if !found { + return "", false + } + return field.Documentation(), true } // FindIdent takes a qualified identifier name and returns a ref.Val if one exists. @@ -268,9 +340,8 @@ func (p *Registry) NewValue(structType string, fields map[string]ref.Val) ref.Va return NewErr("unknown type '%s'", structType) } msg := td.New() - fieldMap := td.FieldMap() for name, value := range fields { - field, found := fieldMap[name] + field, found := td.FieldByName(name) if !found { return NewErr("no such field: %s", name) } diff --git a/vendor/github.com/google/cel-go/common/types/ref/provider.go b/vendor/github.com/google/cel-go/common/types/ref/provider.go index b9820023d..ed5ab0662 100644 --- a/vendor/github.com/google/cel-go/common/types/ref/provider.go +++ b/vendor/github.com/google/cel-go/common/types/ref/provider.go @@ -93,6 +93,9 @@ type FieldType struct { // GetFrom retrieves the field value on the input object, if set. GetFrom FieldGetter + + // IsJSONFIeld indicates that the field was accessed via its JSON name. + IsJSONField bool } // FieldTester is used to test field presence on an input object. diff --git a/vendor/github.com/google/cel-go/ext/formatting.go b/vendor/github.com/google/cel-go/ext/formatting.go index 111184b73..35fb17048 100644 --- a/vendor/github.com/google/cel-go/ext/formatting.go +++ b/vendor/github.com/google/cel-go/ext/formatting.go @@ -410,7 +410,9 @@ func (c *stringFormatter) Octal(arg ref.Val, locale string) (string, error) { // stringFormatValidator implements the cel.ASTValidator interface allowing for static validation // of string.format calls. -type stringFormatValidator struct{} +type stringFormatValidator struct { + maxPrecision int +} // Name returns the name of the validator. func (stringFormatValidator) Name() string { @@ -427,7 +429,7 @@ func (stringFormatValidator) Configure(config cel.MutableValidatorConfig) error // Validate parses all literal format strings and type checks the format clause against the argument // at the corresponding ordinal within the list literal argument to the function, if one is specified. -func (stringFormatValidator) Validate(env *cel.Env, _ cel.ValidatorConfig, a *ast.AST, iss *cel.Issues) { +func (v stringFormatValidator) Validate(env *cel.Env, _ cel.ValidatorConfig, a *ast.AST, iss *cel.Issues) { root := ast.NavigateAST(a) formatCallExprs := ast.MatchDescendants(root, matchConstantFormatStringWithListLiteralArgs(a)) for _, e := range formatCallExprs { @@ -439,7 +441,7 @@ func (stringFormatValidator) Validate(env *cel.Env, _ cel.ValidatorConfig, a *as ast: a, } // use a placeholder locale, since locale doesn't affect syntax - _, err := parseFormatString(formatStr, formatCheck, formatCheck, "en_US") + _, err := parseFormatString(formatStr, formatCheck, formatCheck, "en_US", v.maxPrecision) if err != nil { iss.ReportErrorAtID(getErrorExprID(e.ID(), err), "%v", err) continue @@ -778,7 +780,7 @@ type formatListArgs interface { // parseFormatString formats a string according to the string.format syntax, taking the clause implementations // from the provided FormatCallback and the args from the given FormatList. -func parseFormatString(formatStr string, callback formatStringInterpolator, list formatListArgs, locale string) (string, error) { +func parseFormatString(formatStr string, callback formatStringInterpolator, list formatListArgs, locale string, maxPrecision int) (string, error) { i := 0 argIndex := 0 var builtStr strings.Builder @@ -802,7 +804,7 @@ func parseFormatString(formatStr string, callback formatStringInterpolator, list if int64(argIndex) >= list.Size() { return "", fmt.Errorf("index %d out of range", argIndex) } - numRead, val, refErr := parseAndFormatClause(formatStr[i:], argAny, callback, list, locale) + numRead, val, refErr := parseAndFormatClause(formatStr[i:], argAny, callback, list, locale, maxPrecision) if refErr != nil { return "", refErr } @@ -826,9 +828,9 @@ func parseFormatString(formatStr string, callback formatStringInterpolator, list // parseAndFormatClause parses the format clause at the start of the given string with val, and returns // how many characters were consumed and the substituted string form of val, or an error if one occurred. -func parseAndFormatClause(formatStr string, val ref.Val, callback formatStringInterpolator, list formatListArgs, locale string) (int, string, error) { +func parseAndFormatClause(formatStr string, val ref.Val, callback formatStringInterpolator, list formatListArgs, locale string, maxPrecision int) (int, string, error) { i := 1 - read, formatter, err := parseFormattingClause(formatStr[i:], callback) + read, formatter, err := parseFormattingClause(formatStr[i:], callback, maxPrecision) i += read if err != nil { return -1, "", newParseFormatError("could not parse formatting clause", err) @@ -841,9 +843,9 @@ func parseAndFormatClause(formatStr string, val ref.Val, callback formatStringIn return i, valStr, nil } -func parseFormattingClause(formatStr string, callback formatStringInterpolator) (int, clauseImpl, error) { +func parseFormattingClause(formatStr string, callback formatStringInterpolator, maxPrecision int) (int, clauseImpl, error) { i := 0 - read, precision, err := parsePrecision(formatStr[i:]) + read, precision, err := parsePrecision(formatStr[i:], maxPrecision) i += read if err != nil { return -1, nil, fmt.Errorf("error while parsing precision: %w", err) @@ -870,7 +872,7 @@ func parseFormattingClause(formatStr string, callback formatStringInterpolator) } } -func parsePrecision(formatStr string) (int, *int, error) { +func parsePrecision(formatStr string, maxPrecision int) (int, *int, error) { i := 0 if formatStr[i] != '.' { return i, nil, nil @@ -891,6 +893,9 @@ func parsePrecision(formatStr string) (int, *int, error) { if err != nil { return -1, nil, fmt.Errorf("error while converting precision to integer: %w", err) } + if maxPrecision > 0 && precision > maxPrecision { + return -1, nil, fmt.Errorf("precision %d exceeds maximum allowed precision %d", precision, maxPrecision) + } return i, &precision, nil } diff --git a/vendor/github.com/google/cel-go/ext/formatting_v2.go b/vendor/github.com/google/cel-go/ext/formatting_v2.go index 6ac55b5d9..f923cc7e1 100644 --- a/vendor/github.com/google/cel-go/ext/formatting_v2.go +++ b/vendor/github.com/google/cel-go/ext/formatting_v2.go @@ -402,7 +402,9 @@ func (c *stringFormatterV2) Octal(arg ref.Val) (string, error) { // stringFormatValidatorV2 implements the cel.ASTValidator interface allowing for static validation // of string.format calls. -type stringFormatValidatorV2 struct{} +type stringFormatValidatorV2 struct { + maxPrecision int +} // Name returns the name of the validator. func (stringFormatValidatorV2) Name() string { @@ -419,7 +421,7 @@ func (stringFormatValidatorV2) Configure(config cel.MutableValidatorConfig) erro // Validate parses all literal format strings and type checks the format clause against the argument // at the corresponding ordinal within the list literal argument to the function, if one is specified. -func (stringFormatValidatorV2) Validate(env *cel.Env, _ cel.ValidatorConfig, a *ast.AST, iss *cel.Issues) { +func (v stringFormatValidatorV2) Validate(env *cel.Env, _ cel.ValidatorConfig, a *ast.AST, iss *cel.Issues) { root := ast.NavigateAST(a) formatCallExprs := ast.MatchDescendants(root, matchConstantFormatStringWithListLiteralArgs(a)) for _, e := range formatCallExprs { @@ -431,7 +433,7 @@ func (stringFormatValidatorV2) Validate(env *cel.Env, _ cel.ValidatorConfig, a * ast: a, } // use a placeholder locale, since locale doesn't affect syntax - _, err := parseFormatStringV2(formatStr, formatCheck, formatCheck) + _, err := parseFormatStringV2(formatStr, formatCheck, formatCheck, v.maxPrecision) if err != nil { iss.ReportErrorAtID(getErrorExprID(e.ID(), err), "%v", err) continue @@ -668,7 +670,7 @@ type formatStringInterpolatorV2 interface { // parseFormatString formats a string according to the string.format syntax, taking the clause implementations // from the provided FormatCallback and the args from the given FormatList. -func parseFormatStringV2(formatStr string, callback formatStringInterpolatorV2, list formatListArgs) (string, error) { +func parseFormatStringV2(formatStr string, callback formatStringInterpolatorV2, list formatListArgs, maxPrecision int) (string, error) { i := 0 argIndex := 0 var builtStr strings.Builder @@ -692,7 +694,7 @@ func parseFormatStringV2(formatStr string, callback formatStringInterpolatorV2, if int64(argIndex) >= list.Size() { return "", fmt.Errorf("index %d out of range", argIndex) } - numRead, val, refErr := parseAndFormatClauseV2(formatStr[i:], argAny, callback, list) + numRead, val, refErr := parseAndFormatClauseV2(formatStr[i:], argAny, callback, list, maxPrecision) if refErr != nil { return "", refErr } @@ -716,9 +718,9 @@ func parseFormatStringV2(formatStr string, callback formatStringInterpolatorV2, // parseAndFormatClause parses the format clause at the start of the given string with val, and returns // how many characters were consumed and the substituted string form of val, or an error if one occurred. -func parseAndFormatClauseV2(formatStr string, val ref.Val, callback formatStringInterpolatorV2, list formatListArgs) (int, string, error) { +func parseAndFormatClauseV2(formatStr string, val ref.Val, callback formatStringInterpolatorV2, list formatListArgs, maxPrecision int) (int, string, error) { i := 1 - read, formatter, err := parseFormattingClauseV2(formatStr[i:], callback) + read, formatter, err := parseFormattingClauseV2(formatStr[i:], callback, maxPrecision) i += read if err != nil { return -1, "", newParseFormatError("could not parse formatting clause", err) @@ -731,9 +733,9 @@ func parseAndFormatClauseV2(formatStr string, val ref.Val, callback formatString return i, valStr, nil } -func parseFormattingClauseV2(formatStr string, callback formatStringInterpolatorV2) (int, clauseImplV2, error) { +func parseFormattingClauseV2(formatStr string, callback formatStringInterpolatorV2, maxPrecision int) (int, clauseImplV2, error) { i := 0 - read, precision, err := parsePrecisionV2(formatStr[i:]) + read, precision, err := parsePrecisionV2(formatStr[i:], maxPrecision) i += read if err != nil { return -1, nil, fmt.Errorf("error while parsing precision: %w", err) @@ -760,7 +762,7 @@ func parseFormattingClauseV2(formatStr string, callback formatStringInterpolator } } -func parsePrecisionV2(formatStr string) (int, int, error) { +func parsePrecisionV2(formatStr string, maxPrecision int) (int, int, error) { i := 0 if formatStr[i] != '.' { return i, defaultPrecision, nil @@ -784,5 +786,8 @@ func parsePrecisionV2(formatStr string) (int, int, error) { if precision < 0 { return -1, -1, fmt.Errorf("negative precision: %d", precision) } + if maxPrecision > 0 && precision > maxPrecision { + return -1, -1, fmt.Errorf("precision %d exceeds maximum allowed precision %d", precision, maxPrecision) + } return i, precision, nil } diff --git a/vendor/github.com/google/cel-go/ext/native.go b/vendor/github.com/google/cel-go/ext/native.go index 315567745..c30f26ad3 100644 --- a/vendor/github.com/google/cel-go/ext/native.go +++ b/vendor/github.com/google/cel-go/ext/native.go @@ -434,10 +434,18 @@ func convertToCelType(refType reflect.Type) (*cel.Type, bool) { if refType == timestampType { return cel.TimestampType, true } + if refType.Implements(refValType) { + emptyCelVal := reflect.New(refType).Elem().Interface().(ref.Val) + return emptyCelVal.Type().(*cel.Type), true + } return cel.ObjectType( fmt.Sprintf("%s.%s", simplePkgAlias(refType.PkgPath()), refType.Name()), ), true case reflect.Pointer: + if refType.Implements(refValType) { + emptyCelVal := reflect.New(refType.Elem()).Interface().(ref.Val) + return emptyCelVal.Type().(*cel.Type), true + } if refType.Implements(pbMsgInterfaceType) { pbMsg := reflect.New(refType.Elem()).Interface().(protoreflect.ProtoMessage) return cel.ObjectType(string(pbMsg.ProtoReflect().Descriptor().FullName())), true @@ -608,6 +616,10 @@ func newNativeTypes(fieldNameHandler NativeTypesFieldNameHandler, rawType reflec alreadySeen := make(map[string]struct{}) var iterateStructMembers func(reflect.Type) iterateStructMembers = func(t reflect.Type) { + if t.Implements(reflect.TypeFor[ref.Val]()) { + // skip this field since it's a CEL ref.Val instance. + return + } if k := t.Kind(); k == reflect.Pointer || k == reflect.Slice || k == reflect.Array || k == reflect.Map { iterateStructMembers(t.Elem()) return @@ -791,7 +803,8 @@ func isSupportedType(refType reflect.Type) bool { } var ( - pbMsgInterfaceType = reflect.TypeOf((*protoreflect.ProtoMessage)(nil)).Elem() - timestampType = reflect.TypeOf(time.Now()) - durationType = reflect.TypeOf(time.Nanosecond) + pbMsgInterfaceType = reflect.TypeFor[protoreflect.ProtoMessage]() + refValType = reflect.TypeFor[ref.Val]() + timestampType = reflect.TypeFor[time.Time]() + durationType = reflect.TypeFor[time.Duration]() ) diff --git a/vendor/github.com/google/cel-go/ext/strings.go b/vendor/github.com/google/cel-go/ext/strings.go index de65421f6..66b7806a3 100644 --- a/vendor/github.com/google/cel-go/ext/strings.go +++ b/vendor/github.com/google/cel-go/ext/strings.go @@ -303,8 +303,9 @@ func Strings(options ...StringsOption) cel.EnvOption { } type stringLib struct { - locale string - version uint32 + locale string + version uint32 + maxPrecision int } // LibraryName implements the SingletonLibrary interface method. @@ -353,6 +354,16 @@ func StringsValidateFormatCalls(value bool) StringsOption { } } +// StringsMaxPrecision configures the maximum precision for floating-point format clauses. +// +// If not set, the default is 100 for version >= 5, and no limit for earlier versions. +func StringsMaxPrecision(limit int) StringsOption { + return func(lib *stringLib) *stringLib { + lib.maxPrecision = limit + return lib + } +} + // CompileOptions implements the Library interface method. func (lib *stringLib) CompileOptions() []cel.EnvOption { formatLocale := "en_US" @@ -470,6 +481,13 @@ func (lib *stringLib) CompileOptions() []cel.EnvOption { return stringOrError(upperASCII(string(s))) }))), } + // maxPrecision is unbounded (0) for versions < 5 to maintain backward + // compatibility. For version >= 5, the default is 100 if not explicitly + // configured via StringsMaxPrecision(). + maxPrecision := lib.maxPrecision + if maxPrecision == 0 && lib.version >= 5 { + maxPrecision = 100 + } if lib.version >= 1 { if lib.version >= 4 { opts = append(opts, cel.Function("format", @@ -477,7 +495,7 @@ func (lib *stringLib) CompileOptions() []cel.EnvOption { cel.FunctionBinding(func(args ...ref.Val) ref.Val { s := string(args[0].(types.String)) formatArgs := args[1].(traits.Lister) - return stringOrError(parseFormatStringV2(s, &stringFormatterV2{}, &stringArgList{formatArgs})) + return stringOrError(parseFormatStringV2(s, &stringFormatterV2{}, &stringArgList{formatArgs}, maxPrecision)) })))) } else { opts = append(opts, cel.Function("format", @@ -485,7 +503,7 @@ func (lib *stringLib) CompileOptions() []cel.EnvOption { cel.FunctionBinding(func(args ...ref.Val) ref.Val { s := string(args[0].(types.String)) formatArgs := args[1].(traits.Lister) - return stringOrError(parseFormatString(s, &stringFormatter{}, &stringArgList{formatArgs}, formatLocale)) + return stringOrError(parseFormatString(s, &stringFormatter{}, &stringArgList{formatArgs}, formatLocale, maxPrecision)) })))) } opts = append(opts, @@ -544,9 +562,9 @@ func (lib *stringLib) CompileOptions() []cel.EnvOption { } if lib.version >= 1 { if lib.version >= 4 { - opts = append(opts, cel.ASTValidators(stringFormatValidatorV2{})) + opts = append(opts, cel.ASTValidators(stringFormatValidatorV2{maxPrecision: maxPrecision})) } else { - opts = append(opts, cel.ASTValidators(stringFormatValidator{})) + opts = append(opts, cel.ASTValidators(stringFormatValidator{maxPrecision: maxPrecision})) } } return opts diff --git a/vendor/github.com/google/cel-go/interpreter/attributes.go b/vendor/github.com/google/cel-go/interpreter/attributes.go index 053cb6851..6b8b5c1b6 100644 --- a/vendor/github.com/google/cel-go/interpreter/attributes.go +++ b/vendor/github.com/google/cel-go/interpreter/attributes.go @@ -349,7 +349,7 @@ func (a *absoluteAttribute) Resolve(vars Activation) (any, error) { obj, found := v.ResolveName(nm) if found { if celErr, ok := obj.(*types.Err); ok { - return nil, celErr.Unwrap() + return nil, celErr } obj, isOpt, err := applyQualifiers(v, obj, a.qualifiers) if err != nil { diff --git a/vendor/github.com/google/cel-go/interpreter/interpretable.go b/vendor/github.com/google/cel-go/interpreter/interpretable.go index 9c8575db5..50e66d637 100644 --- a/vendor/github.com/google/cel-go/interpreter/interpretable.go +++ b/vendor/github.com/google/cel-go/interpreter/interpretable.go @@ -1433,7 +1433,7 @@ func (f *folder) AsPartialActivation() (PartialActivation, bool) { func (f *folder) evalResult() ref.Val { f.computeResult = true if f.interrupted { - return types.NewErr("operation interrupted") + return types.WrapErr(InterruptError{}) } res := f.result.Eval(f) // Convert a mutable list or map to an immutable one if the comprehension has generated a list or @@ -1468,6 +1468,20 @@ func checkInterrupt(a Activation) bool { return found && stop == true } +// InterruptError is a specialized error type used to signal that program evaluation should check +// whether a context cancellation is responsible for the error. +type InterruptError struct{} + +// Error returns operation interrupted. +func (InterruptError) Error() string { + return "operation interrupted" +} + +// Is returns whether two errors are interrupt errors. +func (ie InterruptError) Is(target error) bool { + return target.Error() == ie.Error() +} + var ( // pool of var folders to reduce allocations during folds. folderPool = &sync.Pool{ diff --git a/vendor/github.com/google/cel-go/parser/parser.go b/vendor/github.com/google/cel-go/parser/parser.go index b5ec73ec6..d1567b5ff 100644 --- a/vendor/github.com/google/cel-go/parser/parser.go +++ b/vendor/github.com/google/cel-go/parser/parser.go @@ -42,6 +42,7 @@ type Parser struct { func NewParser(opts ...Option) (*Parser, error) { p := &Parser{} p.enableHiddenAccumulatorName = true + p.enableIdentEscapeSyntax = true for _, opt := range opts { if err := opt(&p.options); err != nil { return nil, err diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml index 4528059ca..804a20181 100644 --- a/vendor/github.com/klauspost/compress/.goreleaser.yml +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -31,6 +31,9 @@ builds: - mips64le goarm: - 7 + ignore: + - goos: windows + goarch: arm - id: "s2d" binary: s2d @@ -57,6 +60,9 @@ builds: - mips64le goarm: - 7 + ignore: + - goos: windows + goarch: arm - id: "s2sx" binary: s2sx @@ -84,6 +90,9 @@ builds: - mips64le goarm: - 7 + ignore: + - goos: windows + goarch: arm archives: - @@ -91,7 +100,7 @@ archives: name_template: "s2-{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" format_overrides: - goos: windows - format: zip + formats: ['zip'] files: - unpack/* - s2/LICENSE diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 5125c1f26..e839fe9c6 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -26,6 +26,12 @@ This package will support the current Go version and 2 versions back. Use the links above for more information on each. # changelog + +* Feb 9th, 2026 [1.18.4](https://github.com/klauspost/compress/releases/tag/v1.18.4) + * gzhttp: Add zstandard to server handler wrapper https://github.com/klauspost/compress/pull/1121 + * zstd: Add ResetWithOptions to encoder/decoder https://github.com/klauspost/compress/pull/1122 + * gzhttp: preserve qvalue when extra parameters follow in Accept-Encoding by @analytically in https://github.com/klauspost/compress/pull/1116 + * Jan 16th, 2026 [1.18.3](https://github.com/klauspost/compress/releases/tag/v1.18.3) * Downstream CVE-2025-61728. See [golang/go#77102](https://github.com/golang/go/issues/77102). @@ -691,3 +697,4 @@ This code is licensed under the same conditions as the original Go code. See LIC + diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go index 99ddd4af9..2d6ef64be 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go @@ -1,5 +1,4 @@ //go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc // This file contains the specialisation of Decoder.Decompress4X // and Decoder.Decompress1X that use an asm implementation of thir main loops. diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go index 908c17de6..610392322 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go @@ -1,5 +1,4 @@ //go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm // This file contains a generic implementation of Decoder.Decompress4X. package huff0 diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go index e802579c4..b97f9056f 100644 --- a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go @@ -1,5 +1,4 @@ //go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc package cpuinfo diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go index fd35ea148..0e33aea44 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -78,6 +78,7 @@ func (b *blockEnc) initNewEncode() { b.recentOffsets = [3]uint32{1, 4, 8} b.litEnc.Reuse = huff0.ReusePolicyNone b.coders.setPrev(nil, nil, nil) + b.dictLitEnc = nil } // reset will reset the block for a new encode, but in the same stream, diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go index c1192ec38..c4de134a7 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -21,7 +21,7 @@ type fastBase struct { crc *xxhash.Digest tmp [8]byte blk *blockEnc - lastDictID uint32 + lastDict *dict lowMem bool } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go index c1581cfcb..851799322 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -479,10 +479,13 @@ func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { if d == nil { return } + dictChanged := d != e.lastDict // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) || dictChanged { if len(e.dictTable) != len(e.table) { e.dictTable = make([]prevEntry, len(e.table)) + } else { + clear(e.dictTable) } end := int32(len(d.content)) - 8 + e.maxMatchOff for i := e.maxMatchOff; i < end; i += 4 { @@ -510,13 +513,14 @@ func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { offset: i + 3, } } - e.lastDictID = d.id } - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + // Init or copy dict long table + if len(e.dictLongTable) != len(e.longTable) || dictChanged { if len(e.dictLongTable) != len(e.longTable) { e.dictLongTable = make([]prevEntry, len(e.longTable)) + } else { + clear(e.dictLongTable) } if len(d.content) >= 8 { cv := load6432(d.content, 0) @@ -538,8 +542,8 @@ func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { off++ } } - e.lastDictID = d.id } + e.lastDict = d // Reset table to initial state copy(e.longTable[:], e.dictLongTable) diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index 85dcd28c3..3305f0924 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -1102,10 +1102,13 @@ func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { if d == nil { return } + dictChanged := d != e.lastDict // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) || dictChanged { if len(e.dictTable) != len(e.table) { e.dictTable = make([]tableEntry, len(e.table)) + } else { + clear(e.dictTable) } end := int32(len(d.content)) - 8 + e.maxMatchOff for i := e.maxMatchOff; i < end; i += 4 { @@ -1133,14 +1136,15 @@ func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { offset: i + 3, } } - e.lastDictID = d.id e.allDirty = true } - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + // Init or copy dict long table + if len(e.dictLongTable) != len(e.longTable) || dictChanged { if len(e.dictLongTable) != len(e.longTable) { e.dictLongTable = make([]prevEntry, len(e.longTable)) + } else { + clear(e.dictLongTable) } if len(d.content) >= 8 { cv := load6432(d.content, 0) @@ -1162,9 +1166,9 @@ func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { off++ } } - e.lastDictID = d.id e.allDirty = true } + e.lastDict = d // Reset table to initial state { diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index cf8cad00d..2fb6da112 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -1040,15 +1040,18 @@ func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { // ResetDict will reset and set a dictionary if not nil func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { allDirty := e.allDirty + dictChanged := d != e.lastDict e.fastEncoderDict.Reset(d, singleBlock) if d == nil { return } // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) || dictChanged { if len(e.dictLongTable) != len(e.longTable) { e.dictLongTable = make([]tableEntry, len(e.longTable)) + } else { + clear(e.dictLongTable) } if len(d.content) >= 8 { cv := load6432(d.content, 0) @@ -1065,7 +1068,6 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { } } } - e.lastDictID = d.id allDirty = true } // Reset table to initial state diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index 9180a3a58..5e104f1a4 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -805,9 +805,11 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { } // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) || d != e.lastDict { if len(e.dictTable) != len(e.table) { e.dictTable = make([]tableEntry, len(e.table)) + } else { + clear(e.dictTable) } if true { end := e.maxMatchOff + int32(len(d.content)) - 8 @@ -827,7 +829,7 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { } } } - e.lastDictID = d.id + e.lastDict = d e.allDirty = true } diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index 19e730acc..0f2a00a00 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -138,11 +138,18 @@ func (e *Encoder) Reset(w io.Writer) { func (e *Encoder) ResetWithOptions(w io.Writer, opts ...EOption) error { e.o.resetOpt = true defer func() { e.o.resetOpt = false }() + hadDict := e.o.dict != nil for _, o := range opts { if err := o(&e.o); err != nil { return err } } + hasDict := e.o.dict != nil + if hadDict != hasDict { + // Dict presence changed — encoder type must be recreated. + e.state.encoder = nil + e.init = sync.Once{} + } e.Reset(w) return nil } @@ -448,6 +455,12 @@ func (e *Encoder) Close() error { if s.encoder == nil { return nil } + if s.w == nil { + if len(s.filling) == 0 && !s.headerWritten && !s.eofWritten && s.nInput == 0 { + return nil + } + return errors.New("zstd: encoder has no writer") + } err := e.nextBlock(true) if err != nil { if errors.Is(s.err, ErrEncoderClosed) { diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index 8e0f5cac7..e217be0a1 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -42,6 +42,7 @@ func (o *encoderOptions) setDefault() { level: SpeedDefault, allLitEntropy: false, lowMem: false, + fullZero: true, } } diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go index d04a829b0..b8c8607b5 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go @@ -1,5 +1,4 @@ //go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc package zstd diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go index 8adfebb02..2138f8091 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go @@ -1,5 +1,4 @@ //go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm package zstd diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go index 0be16cefc..9576426e6 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go @@ -1,5 +1,4 @@ //go:build (!amd64 && !arm64) || appengine || !gc || purego || noasm -// +build !amd64,!arm64 appengine !gc purego noasm package xxhash diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go index f41932b7a..1ed18927f 100644 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go @@ -1,5 +1,4 @@ //go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc // Copyright 2019+ Klaus Post. All rights reserved. // License information can be found in the LICENSE file. diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go index bea1779e9..379746c96 100644 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go @@ -1,5 +1,4 @@ //go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm // Copyright 2019+ Klaus Post. All rights reserved. // License information can be found in the LICENSE file. diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go index 1f8c3cec2..18c3703dd 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -1,5 +1,4 @@ //go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc package zstd diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go index 7cec2197c..516cd9b07 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go @@ -1,5 +1,4 @@ //go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm package zstd diff --git a/vendor/github.com/panjf2000/ants/v2/README.md b/vendor/github.com/panjf2000/ants/v2/README.md index 3baa602db..b332f804b 100644 --- a/vendor/github.com/panjf2000/ants/v2/README.md +++ b/vendor/github.com/panjf2000/ants/v2/README.md @@ -7,7 +7,7 @@
- + @@ -215,7 +215,7 @@ Trusted by the following corporations/organizations. - + diff --git a/vendor/github.com/panjf2000/ants/v2/README_ZH.md b/vendor/github.com/panjf2000/ants/v2/README_ZH.md index 69e8736d1..ea30a8f4c 100644 --- a/vendor/github.com/panjf2000/ants/v2/README_ZH.md +++ b/vendor/github.com/panjf2000/ants/v2/README_ZH.md @@ -7,7 +7,7 @@
- + @@ -216,7 +216,7 @@ pool.Reboot() - + diff --git a/vendor/github.com/panjf2000/ants/v2/ants.go b/vendor/github.com/panjf2000/ants/v2/ants.go index 0dfce8a1a..8a1cfb7b0 100644 --- a/vendor/github.com/panjf2000/ants/v2/ants.go +++ b/vendor/github.com/panjf2000/ants/v2/ants.go @@ -139,6 +139,14 @@ func ReleaseTimeout(timeout time.Duration) error { return defaultAntsPool.ReleaseTimeout(timeout) } +// ReleaseContext is like Release but with a context, it waits all workers to exit before the context is done. +// +// Note that if the context is nil, it is the same as Release, +// just return immediately without waiting for all workers to exit. +func ReleaseContext(ctx context.Context) error { + return defaultAntsPool.ReleaseContext(ctx) +} + // Reboot reboots the default pool. func Reboot() { defaultAntsPool.Reboot() @@ -323,8 +331,8 @@ func (p *poolCommon) goTicktock() { go p.ticktock() } -func (p *poolCommon) nowTime() time.Time { - return time.Unix(0, atomic.LoadInt64(&p.now)) +func (p *poolCommon) nowTime() int64 { + return atomic.LoadInt64(&p.now) } // Running returns the number of workers currently running. @@ -398,12 +406,31 @@ func (p *poolCommon) Release() { // ReleaseTimeout is like Release but with a timeout, it waits all workers to exit before timing out. func (p *poolCommon) ReleaseTimeout(timeout time.Duration) error { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + err := p.ReleaseContext(ctx) + if errors.Is(err, context.DeadlineExceeded) { + return ErrTimeout + } + return err +} + +// ReleaseContext is like Release but with a context, it waits all workers to exit before the context is done. +// +// Note that if the context is nil, it is the same as Release, +// just return immediately without waiting for all workers to exit. +func (p *poolCommon) ReleaseContext(ctx context.Context) error { if p.IsClosed() || (!p.options.DisablePurge && p.stopPurge == nil) || p.stopTicktock == nil { return ErrPoolClosed } p.Release() + // Don't wait for all workers to exit, just return immediately if the context is nil. + if ctx == nil { + return nil + } + var purgeCh <-chan struct{} if !p.options.DisablePurge { purgeCh = p.purgeCtx.Done() @@ -417,12 +444,10 @@ func (p *poolCommon) ReleaseTimeout(timeout time.Duration) error { }) } - timer := time.NewTimer(timeout) - defer timer.Stop() for { select { - case <-timer.C: - return ErrTimeout + case <-ctx.Done(): + return ctx.Err() case <-p.allDone: <-purgeCh <-p.ticktockCtx.Done() diff --git a/vendor/github.com/panjf2000/ants/v2/multipool.go b/vendor/github.com/panjf2000/ants/v2/multipool.go index 342b03830..eac5916b5 100644 --- a/vendor/github.com/panjf2000/ants/v2/multipool.go +++ b/vendor/github.com/panjf2000/ants/v2/multipool.go @@ -23,6 +23,7 @@ package ants import ( + "context" "errors" "fmt" "math" @@ -44,6 +45,43 @@ const ( LeastTasks ) +type contextReleaser interface { + ReleaseContext(ctx context.Context) error +} + +func releasePools(ctx context.Context, pools []contextReleaser) error { + errCh := make(chan error, len(pools)) + var wg errgroup.Group + for i, pool := range pools { + func(p contextReleaser, idx int) { + wg.Go(func() error { + err := p.ReleaseContext(ctx) + if err != nil { + err = fmt.Errorf("pool %d: %v", idx, err) + } + errCh <- err + return err + }) + }(pool, i) + } + + _ = wg.Wait() + + var errStr strings.Builder + for i := 0; i < len(pools); i++ { + if err := <-errCh; err != nil { + errStr.WriteString(err.Error()) + errStr.WriteString(" | ") + } + } + + if errStr.Len() == 0 { + return nil + } + + return errors.New(strings.TrimSuffix(errStr.String(), " | ")) +} + // MultiPool consists of multiple pools, from which you will benefit the // performance improvement on basis of the fine-grained locking that reduces // the lock contention. @@ -182,40 +220,23 @@ func (mp *MultiPool) IsClosed() bool { // ReleaseTimeout closes the multi-pool with a timeout, // it waits all pools to be closed before timing out. func (mp *MultiPool) ReleaseTimeout(timeout time.Duration) error { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + return mp.ReleaseContext(ctx) +} + +// ReleaseContext closes the multi-pool with a context, +// it waits all pools to be closed before the context is done. +func (mp *MultiPool) ReleaseContext(ctx context.Context) error { if !atomic.CompareAndSwapInt32(&mp.state, OPENED, CLOSED) { return ErrPoolClosed } - errCh := make(chan error, len(mp.pools)) - var wg errgroup.Group - for i, pool := range mp.pools { - func(p *Pool, idx int) { - wg.Go(func() error { - err := p.ReleaseTimeout(timeout) - if err != nil { - err = fmt.Errorf("pool %d: %v", idx, err) - } - errCh <- err - return err - }) - }(pool, i) + pools := make([]contextReleaser, len(mp.pools)) + for i, p := range mp.pools { + pools[i] = p } - - _ = wg.Wait() - - var errStr strings.Builder - for i := 0; i < len(mp.pools); i++ { - if err := <-errCh; err != nil { - errStr.WriteString(err.Error()) - errStr.WriteString(" | ") - } - } - - if errStr.Len() == 0 { - return nil - } - - return errors.New(strings.TrimSuffix(errStr.String(), " | ")) + return releasePools(ctx, pools) } // Reboot reboots a released multi-pool. diff --git a/vendor/github.com/panjf2000/ants/v2/multipool_func.go b/vendor/github.com/panjf2000/ants/v2/multipool_func.go index 7b4b6e541..c2ceb13e3 100644 --- a/vendor/github.com/panjf2000/ants/v2/multipool_func.go +++ b/vendor/github.com/panjf2000/ants/v2/multipool_func.go @@ -23,14 +23,10 @@ package ants import ( - "errors" - "fmt" + "context" "math" - "strings" "sync/atomic" "time" - - "golang.org/x/sync/errgroup" ) // MultiPoolWithFunc consists of multiple pools, from which you will benefit the @@ -172,40 +168,23 @@ func (mp *MultiPoolWithFunc) IsClosed() bool { // ReleaseTimeout closes the multi-pool with a timeout, // it waits all pools to be closed before timing out. func (mp *MultiPoolWithFunc) ReleaseTimeout(timeout time.Duration) error { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + return mp.ReleaseContext(ctx) +} + +// ReleaseContext closes the multi-pool with a context, +// it waits all pools to be closed before the context is done. +func (mp *MultiPoolWithFunc) ReleaseContext(ctx context.Context) error { if !atomic.CompareAndSwapInt32(&mp.state, OPENED, CLOSED) { return ErrPoolClosed } - errCh := make(chan error, len(mp.pools)) - var wg errgroup.Group - for i, pool := range mp.pools { - func(p *PoolWithFunc, idx int) { - wg.Go(func() error { - err := p.ReleaseTimeout(timeout) - if err != nil { - err = fmt.Errorf("pool %d: %v", idx, err) - } - errCh <- err - return err - }) - }(pool, i) - } - - _ = wg.Wait() - - var errStr strings.Builder - for i := 0; i < len(mp.pools); i++ { - if err := <-errCh; err != nil { - errStr.WriteString(err.Error()) - errStr.WriteString(" | ") - } + pools := make([]contextReleaser, len(mp.pools)) + for i, p := range mp.pools { + pools[i] = p } - - if errStr.Len() == 0 { - return nil - } - - return errors.New(strings.TrimSuffix(errStr.String(), " | ")) + return releasePools(ctx, pools) } // Reboot reboots a released multi-pool. diff --git a/vendor/github.com/panjf2000/ants/v2/multipool_func_generic.go b/vendor/github.com/panjf2000/ants/v2/multipool_func_generic.go index f5931e51b..2aad831ec 100644 --- a/vendor/github.com/panjf2000/ants/v2/multipool_func_generic.go +++ b/vendor/github.com/panjf2000/ants/v2/multipool_func_generic.go @@ -23,14 +23,10 @@ package ants import ( - "errors" - "fmt" + "context" "math" - "strings" "sync/atomic" "time" - - "golang.org/x/sync/errgroup" ) // MultiPoolWithFuncGeneric is the generic version of MultiPoolWithFunc. @@ -168,40 +164,23 @@ func (mp *MultiPoolWithFuncGeneric[T]) IsClosed() bool { // ReleaseTimeout closes the multi-pool with a timeout, // it waits all pools to be closed before timing out. func (mp *MultiPoolWithFuncGeneric[T]) ReleaseTimeout(timeout time.Duration) error { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + return mp.ReleaseContext(ctx) +} + +// ReleaseContext closes the multi-pool with a context, +// it waits all pools to be closed before the context is done. +func (mp *MultiPoolWithFuncGeneric[T]) ReleaseContext(ctx context.Context) error { if !atomic.CompareAndSwapInt32(&mp.state, OPENED, CLOSED) { return ErrPoolClosed } - errCh := make(chan error, len(mp.pools)) - var wg errgroup.Group - for i, pool := range mp.pools { - func(p *PoolWithFuncGeneric[T], idx int) { - wg.Go(func() error { - err := p.ReleaseTimeout(timeout) - if err != nil { - err = fmt.Errorf("pool %d: %v", idx, err) - } - errCh <- err - return err - }) - }(pool, i) - } - - _ = wg.Wait() - - var errStr strings.Builder - for i := 0; i < len(mp.pools); i++ { - if err := <-errCh; err != nil { - errStr.WriteString(err.Error()) - errStr.WriteString(" | ") - } + pools := make([]contextReleaser, len(mp.pools)) + for i, p := range mp.pools { + pools[i] = p } - - if errStr.Len() == 0 { - return nil - } - - return errors.New(strings.TrimSuffix(errStr.String(), " | ")) + return releasePools(ctx, pools) } // Reboot reboots a released multi-pool. diff --git a/vendor/github.com/panjf2000/ants/v2/worker.go b/vendor/github.com/panjf2000/ants/v2/worker.go index 03b4bd70e..13eccbc31 100644 --- a/vendor/github.com/panjf2000/ants/v2/worker.go +++ b/vendor/github.com/panjf2000/ants/v2/worker.go @@ -24,7 +24,6 @@ package ants import ( "runtime/debug" - "time" ) // goWorker is the actual executor who runs the tasks, @@ -40,7 +39,7 @@ type goWorker struct { task chan func() // lastUsed will be updated when putting a worker back into queue. - lastUsed time.Time + lastUsed int64 } // run starts a goroutine to repeat the process @@ -82,11 +81,11 @@ func (w *goWorker) finish() { w.task <- nil } -func (w *goWorker) lastUsedTime() time.Time { +func (w *goWorker) lastUsedTime() int64 { return w.lastUsed } -func (w *goWorker) setLastUsedTime(t time.Time) { +func (w *goWorker) setLastUsedTime(t int64) { w.lastUsed = t } diff --git a/vendor/github.com/panjf2000/ants/v2/worker_func.go b/vendor/github.com/panjf2000/ants/v2/worker_func.go index 8437e40df..6b4575b22 100644 --- a/vendor/github.com/panjf2000/ants/v2/worker_func.go +++ b/vendor/github.com/panjf2000/ants/v2/worker_func.go @@ -24,7 +24,6 @@ package ants import ( "runtime/debug" - "time" ) // goWorkerWithFunc is the actual executor who runs the tasks, @@ -40,7 +39,7 @@ type goWorkerWithFunc struct { arg chan any // lastUsed will be updated when putting a worker back into queue. - lastUsed time.Time + lastUsed int64 } // run starts a goroutine to repeat the process @@ -82,11 +81,11 @@ func (w *goWorkerWithFunc) finish() { w.arg <- nil } -func (w *goWorkerWithFunc) lastUsedTime() time.Time { +func (w *goWorkerWithFunc) lastUsedTime() int64 { return w.lastUsed } -func (w *goWorkerWithFunc) setLastUsedTime(t time.Time) { +func (w *goWorkerWithFunc) setLastUsedTime(t int64) { w.lastUsed = t } diff --git a/vendor/github.com/panjf2000/ants/v2/worker_func_generic.go b/vendor/github.com/panjf2000/ants/v2/worker_func_generic.go index a76d109cb..7e4b3b26d 100644 --- a/vendor/github.com/panjf2000/ants/v2/worker_func_generic.go +++ b/vendor/github.com/panjf2000/ants/v2/worker_func_generic.go @@ -24,7 +24,6 @@ package ants import ( "runtime/debug" - "time" ) // goWorkerWithFunc is the actual executor who runs the tasks, @@ -43,7 +42,7 @@ type goWorkerWithFuncGeneric[T any] struct { exit chan struct{} // lastUsed will be updated when putting a worker back into queue. - lastUsed time.Time + lastUsed int64 } // run starts a goroutine to repeat the process @@ -87,10 +86,10 @@ func (w *goWorkerWithFuncGeneric[T]) finish() { w.exit <- struct{}{} } -func (w *goWorkerWithFuncGeneric[T]) lastUsedTime() time.Time { +func (w *goWorkerWithFuncGeneric[T]) lastUsedTime() int64 { return w.lastUsed } -func (w *goWorkerWithFuncGeneric[T]) setLastUsedTime(t time.Time) { +func (w *goWorkerWithFuncGeneric[T]) setLastUsedTime(t int64) { w.lastUsed = t } diff --git a/vendor/github.com/panjf2000/ants/v2/worker_loop_queue.go b/vendor/github.com/panjf2000/ants/v2/worker_loop_queue.go index ad62baea2..e3c9c0833 100644 --- a/vendor/github.com/panjf2000/ants/v2/worker_loop_queue.go +++ b/vendor/github.com/panjf2000/ants/v2/worker_loop_queue.go @@ -92,7 +92,7 @@ func (wq *loopQueue) detach() worker { } func (wq *loopQueue) refresh(duration time.Duration) []worker { - expiryTime := time.Now().Add(-duration) + expiryTime := time.Now().Add(-duration).UnixNano() index := wq.binarySearch(expiryTime) if index == -1 { return nil @@ -123,12 +123,12 @@ func (wq *loopQueue) refresh(duration time.Duration) []worker { return wq.expiry } -func (wq *loopQueue) binarySearch(expiryTime time.Time) int { +func (wq *loopQueue) binarySearch(expiryTime int64) int { var mid, nlen, basel, tmid int nlen = len(wq.items) // if no need to remove work, return -1 - if wq.isEmpty() || expiryTime.Before(wq.items[wq.head].lastUsedTime()) { + if wq.isEmpty() || expiryTime < wq.items[wq.head].lastUsedTime() { return -1 } @@ -150,7 +150,7 @@ func (wq *loopQueue) binarySearch(expiryTime time.Time) int { mid = l + ((r - l) >> 1) // avoid overflow when computing mid // calculate true mid position from mapped mid position tmid = (mid + basel + nlen) % nlen - if expiryTime.Before(wq.items[tmid].lastUsedTime()) { + if expiryTime < wq.items[tmid].lastUsedTime() { r = mid - 1 } else { l = mid + 1 diff --git a/vendor/github.com/panjf2000/ants/v2/worker_queue.go b/vendor/github.com/panjf2000/ants/v2/worker_queue.go index 948bc9144..f654d43bc 100644 --- a/vendor/github.com/panjf2000/ants/v2/worker_queue.go +++ b/vendor/github.com/panjf2000/ants/v2/worker_queue.go @@ -33,8 +33,8 @@ var errQueueIsFull = errors.New("the queue is full") type worker interface { run() finish() - lastUsedTime() time.Time - setLastUsedTime(t time.Time) + lastUsedTime() int64 + setLastUsedTime(t int64) inputFunc(func()) inputArg(any) } diff --git a/vendor/github.com/panjf2000/ants/v2/worker_stack.go b/vendor/github.com/panjf2000/ants/v2/worker_stack.go index 18dcd23b9..f7afa56fa 100644 --- a/vendor/github.com/panjf2000/ants/v2/worker_stack.go +++ b/vendor/github.com/panjf2000/ants/v2/worker_stack.go @@ -67,7 +67,7 @@ func (ws *workerStack) refresh(duration time.Duration) []worker { return nil } - expiryTime := time.Now().Add(-duration) + expiryTime := time.Now().Add(-duration).UnixNano() index := ws.binarySearch(0, n-1, expiryTime) ws.expiry = ws.expiry[:0] @@ -82,10 +82,10 @@ func (ws *workerStack) refresh(duration time.Duration) []worker { return ws.expiry } -func (ws *workerStack) binarySearch(l, r int, expiryTime time.Time) int { +func (ws *workerStack) binarySearch(l, r int, expiryTime int64) int { for l <= r { mid := l + ((r - l) >> 1) // avoid overflow when computing mid - if expiryTime.Before(ws.items[mid].lastUsedTime()) { + if expiryTime < ws.items[mid].lastUsedTime() { r = mid - 1 } else { l = mid + 1 diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index d19d390d3..cce3ef1d1 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -109,6 +109,24 @@ endif # Build variant:dockerfile pairs for shell iteration. DOCKERFILE_VARIANTS_WITH_NAMES := $(foreach df,$(DOCKERFILE_VARIANTS),$(call dockerfile_variant,$(df)):$(df)) +# Shell helper to check whether a dockerfile/arch pair is excluded. +define dockerfile_arch_is_excluded +case " $(DOCKERFILE_ARCH_EXCLUSIONS) " in \ + *" $$dockerfile:$(1) "*) true ;; \ + *) false ;; \ +esac +endef + +# Shell helper to check whether a registry/arch pair is excluded. +# Extracts registry from DOCKER_REPO (e.g., quay.io/prometheus -> quay.io) +define registry_arch_is_excluded +registry=$$(echo "$(DOCKER_REPO)" | cut -d'/' -f1); \ +case " $(DOCKER_REGISTRY_ARCH_EXCLUSIONS) " in \ + *" $$registry:$(1) "*) true ;; \ + *) false ;; \ +esac +endef + BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS)) PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) @@ -250,6 +268,10 @@ $(BUILD_DOCKER_ARCHS): common-docker-%: @for variant in $(DOCKERFILE_VARIANTS_WITH_NAMES); do \ dockerfile=$${variant#*:}; \ variant_name=$${variant%%:*}; \ + if $(call dockerfile_arch_is_excluded,$*); then \ + echo "Skipping $$variant_name variant for linux-$* (excluded by DOCKERFILE_ARCH_EXCLUSIONS)"; \ + continue; \ + fi; \ distroless_arch="$*"; \ if [ "$*" = "armv7" ]; then \ distroless_arch="arm"; \ @@ -284,6 +306,14 @@ $(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: @for variant in $(DOCKERFILE_VARIANTS_WITH_NAMES); do \ dockerfile=$${variant#*:}; \ variant_name=$${variant%%:*}; \ + if $(call dockerfile_arch_is_excluded,$*); then \ + echo "Skipping push for $$variant_name variant on linux-$* (excluded by DOCKERFILE_ARCH_EXCLUSIONS)"; \ + continue; \ + fi; \ + if $(call registry_arch_is_excluded,$*); then \ + echo "Skipping push for $$variant_name variant on linux-$* to $(DOCKER_REPO) (excluded by DOCKER_REGISTRY_ARCH_EXCLUSIONS)"; \ + continue; \ + fi; \ if [ "$$dockerfile" != "Dockerfile" ] || [ "$$variant_name" != "default" ]; then \ echo "Pushing $$variant_name variant for linux-$*"; \ docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name"; \ @@ -311,6 +341,14 @@ $(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: @for variant in $(DOCKERFILE_VARIANTS_WITH_NAMES); do \ dockerfile=$${variant#*:}; \ variant_name=$${variant%%:*}; \ + if $(call dockerfile_arch_is_excluded,$*); then \ + echo "Skipping tag for $$variant_name variant on linux-$* (excluded by DOCKERFILE_ARCH_EXCLUSIONS)"; \ + continue; \ + fi; \ + if $(call registry_arch_is_excluded,$*); then \ + echo "Skipping tag for $$variant_name variant on linux-$* for $(DOCKER_REPO) (excluded by DOCKER_REGISTRY_ARCH_EXCLUSIONS)"; \ + continue; \ + fi; \ if [ "$$dockerfile" != "Dockerfile" ] || [ "$$variant_name" != "default" ]; then \ echo "Tagging $$variant_name variant for linux-$* as latest"; \ docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest-$$variant_name"; \ @@ -330,23 +368,87 @@ common-docker-manifest: variant_name=$${variant%%:*}; \ if [ "$$dockerfile" != "Dockerfile" ] || [ "$$variant_name" != "default" ]; then \ echo "Creating manifest for $$variant_name variant"; \ - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name); \ + refs=""; \ + for arch in $(DOCKER_ARCHS); do \ + if $(call dockerfile_arch_is_excluded,$$arch); then \ + echo " Skipping $$arch for $$variant_name (excluded by DOCKERFILE_ARCH_EXCLUSIONS)"; \ + continue; \ + fi; \ + if $(call registry_arch_is_excluded,$$arch); then \ + echo " Skipping $$arch for $$variant_name on $(DOCKER_REPO) (excluded by DOCKER_REGISTRY_ARCH_EXCLUSIONS)"; \ + continue; \ + fi; \ + refs="$$refs $(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$$arch:$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name"; \ + done; \ + if [ -z "$$refs" ]; then \ + echo "Skipping manifest for $$variant_name variant (no supported architectures)"; \ + continue; \ + fi; \ + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name" $$refs; \ DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name"; \ fi; \ if [ "$$dockerfile" = "Dockerfile" ]; then \ echo "Creating default variant ($$variant_name) manifest"; \ - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG)); \ + refs=""; \ + for arch in $(DOCKER_ARCHS); do \ + if $(call dockerfile_arch_is_excluded,$$arch); then \ + echo " Skipping $$arch for default variant (excluded by DOCKERFILE_ARCH_EXCLUSIONS)"; \ + continue; \ + fi; \ + if $(call registry_arch_is_excluded,$$arch); then \ + echo " Skipping $$arch for default variant on $(DOCKER_REPO) (excluded by DOCKER_REGISTRY_ARCH_EXCLUSIONS)"; \ + continue; \ + fi; \ + refs="$$refs $(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$$arch:$(SANITIZED_DOCKER_IMAGE_TAG)"; \ + done; \ + if [ -z "$$refs" ]; then \ + echo "Skipping default variant manifest (no supported architectures)"; \ + continue; \ + fi; \ + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $$refs; \ DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)"; \ fi; \ if [ "$(DOCKER_IMAGE_TAG)" = "latest" ]; then \ if [ "$$dockerfile" != "Dockerfile" ] || [ "$$variant_name" != "default" ]; then \ echo "Creating manifest for $$variant_name variant version tag"; \ - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):v$(DOCKER_MAJOR_VERSION_TAG)-$$variant_name" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):v$(DOCKER_MAJOR_VERSION_TAG)-$$variant_name); \ + refs=""; \ + for arch in $(DOCKER_ARCHS); do \ + if $(call dockerfile_arch_is_excluded,$$arch); then \ + echo " Skipping $$arch for $$variant_name version tag (excluded by DOCKERFILE_ARCH_EXCLUSIONS)"; \ + continue; \ + fi; \ + if $(call registry_arch_is_excluded,$$arch); then \ + echo " Skipping $$arch for $$variant_name version tag on $(DOCKER_REPO) (excluded by DOCKER_REGISTRY_ARCH_EXCLUSIONS)"; \ + continue; \ + fi; \ + refs="$$refs $(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$$arch:v$(DOCKER_MAJOR_VERSION_TAG)-$$variant_name"; \ + done; \ + if [ -z "$$refs" ]; then \ + echo "Skipping version-tag manifest for $$variant_name variant (no supported architectures)"; \ + continue; \ + fi; \ + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):v$(DOCKER_MAJOR_VERSION_TAG)-$$variant_name" $$refs; \ DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):v$(DOCKER_MAJOR_VERSION_TAG)-$$variant_name"; \ fi; \ if [ "$$dockerfile" = "Dockerfile" ]; then \ echo "Creating default variant version tag manifest"; \ - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):v$(DOCKER_MAJOR_VERSION_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):v$(DOCKER_MAJOR_VERSION_TAG)); \ + refs=""; \ + for arch in $(DOCKER_ARCHS); do \ + if $(call dockerfile_arch_is_excluded,$$arch); then \ + echo " Skipping $$arch for default variant version tag (excluded by DOCKERFILE_ARCH_EXCLUSIONS)"; \ + continue; \ + fi; \ + if $(call registry_arch_is_excluded,$$arch); then \ + echo " Skipping $$arch for default variant version tag on $(DOCKER_REPO) (excluded by DOCKER_REGISTRY_ARCH_EXCLUSIONS)"; \ + continue; \ + fi; \ + refs="$$refs $(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$$arch:v$(DOCKER_MAJOR_VERSION_TAG)"; \ + done; \ + if [ -z "$$refs" ]; then \ + echo "Skipping default variant version-tag manifest (no supported architectures)"; \ + continue; \ + fi; \ + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):v$(DOCKER_MAJOR_VERSION_TAG)" $$refs; \ DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):v$(DOCKER_MAJOR_VERSION_TAG)"; \ fi; \ fi; \ diff --git a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/common/v1/sigstore_common.pb.go b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/common/v1/sigstore_common.pb.go index 5f339b2d7..4730718f4 100644 --- a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/common/v1/sigstore_common.pb.go +++ b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/common/v1/sigstore_common.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.5 -// protoc v6.30.2 +// protoc-gen-go v1.36.10 +// protoc v7.34.1 // source: sigstore_common.proto package v1 @@ -51,8 +51,14 @@ const ( HashAlgorithm_SHA2_256 HashAlgorithm = 1 HashAlgorithm_SHA2_384 HashAlgorithm = 2 HashAlgorithm_SHA2_512 HashAlgorithm = 3 - HashAlgorithm_SHA3_256 HashAlgorithm = 4 - HashAlgorithm_SHA3_384 HashAlgorithm = 5 + // Used for LMS + // + // Deprecated: Marked as deprecated in sigstore_common.proto. + HashAlgorithm_SHA3_256 HashAlgorithm = 4 + // Used for LMS + // + // Deprecated: Marked as deprecated in sigstore_common.proto. + HashAlgorithm_SHA3_384 HashAlgorithm = 5 ) // Enum value maps for HashAlgorithm. @@ -161,9 +167,7 @@ const ( // LMS and LM-OTS // // These algorithms are deprecated and should not be used. - // Keys and signatures MAY be used by private Sigstore - // deployments, but will not be supported by the public - // good instance. + // There are no plans to support SLH-DSA at this time. // // USER WARNING: LMS and LM-OTS are both stateful signature schemes. // Using them correctly requires discretion and careful consideration @@ -179,18 +183,21 @@ const ( PublicKeyDetails_LMOTS_SHA256 PublicKeyDetails = 15 // ML-DSA // - // These ML_DSA_65 and ML-DSA_87 algorithms are the pure variants that - // take data to sign rather than the prehash variants (HashML-DSA), which - // take digests. While considered quantum-resistant, their usage + // These ML_DSA_44, ML_DSA_65 and ML-DSA_87 algorithms are the pure variants + // that take data to sign rather than the prehash variants (HashML-DSA), which + // take digests. While considered quantum-resistant, their usage // involves tradeoffs in that signatures and keys are much larger, and // this makes deployments more costly. // - // USER WARNING: ML_DSA_65 and ML_DSA_87 are experimental algorithms. + // USER WARNING: ML_DSA_44, ML_DSA_65 and ML_DSA_87 are experimental algorithms. // In the future they MAY be used by private Sigstore deployments, but - // they are not yet fully functional. This warning will be removed when + // they are not yet fully functional. This warning will be removed when // these algorithms are widely supported by Sigstore clients and servers, // but care should still be taken for production environments. - PublicKeyDetails_ML_DSA_65 PublicKeyDetails = 21 // See NIST FIPS 204 + // + // See NIST FIPS 204, RFC 9881 for algorithm identifiers + PublicKeyDetails_ML_DSA_44 PublicKeyDetails = 23 + PublicKeyDetails_ML_DSA_65 PublicKeyDetails = 21 PublicKeyDetails_ML_DSA_87 PublicKeyDetails = 22 ) @@ -218,6 +225,7 @@ var ( 20: "PKIX_ECDSA_P521_SHA_256", 14: "LMS_SHA256", 15: "LMOTS_SHA256", + 23: "ML_DSA_44", 21: "ML_DSA_65", 22: "ML_DSA_87", } @@ -243,6 +251,7 @@ var ( "PKIX_ECDSA_P521_SHA_256": 20, "LMS_SHA256": 14, "LMOTS_SHA256": 15, + "ML_DSA_44": 23, "ML_DSA_65": 21, "ML_DSA_87": 22, } @@ -1062,160 +1071,95 @@ func (x *TimeRange) GetEnd() *timestamppb.Timestamp { var File_sigstore_common_proto protoreflect.FileDescriptor -var file_sigstore_common_proto_rawDesc = string([]byte{ - 0x0a, 0x15, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x16, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, - 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x1a, - 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x22, 0x69, 0x0a, 0x0a, 0x48, 0x61, 0x73, 0x68, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, - 0x43, 0x0a, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, - 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x61, 0x73, 0x68, - 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x52, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, - 0x69, 0x74, 0x68, 0x6d, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x22, 0x80, 0x01, 0x0a, - 0x10, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x12, 0x49, 0x0a, 0x0e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x69, 0x67, - 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x64, 0x65, 0x76, 0x2e, - 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, - 0x76, 0x31, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x52, 0x0d, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x09, - 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, - 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, - 0x23, 0x0a, 0x05, 0x4c, 0x6f, 0x67, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x06, 0x6b, 0x65, 0x79, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x6b, - 0x65, 0x79, 0x49, 0x64, 0x22, 0x48, 0x0a, 0x16, 0x52, 0x46, 0x43, 0x33, 0x31, 0x36, 0x31, 0x53, - 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2e, - 0x0a, 0x10, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0f, 0x73, - 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0xd9, - 0x01, 0x0a, 0x09, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x20, 0x0a, 0x09, - 0x72, 0x61, 0x77, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, - 0x00, 0x52, 0x08, 0x72, 0x61, 0x77, 0x42, 0x79, 0x74, 0x65, 0x73, 0x88, 0x01, 0x01, 0x12, 0x49, - 0x0a, 0x0b, 0x6b, 0x65, 0x79, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, - 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, - 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x0a, 0x6b, - 0x65, 0x79, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x43, 0x0a, 0x09, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x64, - 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, - 0x01, 0x52, 0x08, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x46, 0x6f, 0x72, 0x88, 0x01, 0x01, 0x42, 0x0c, - 0x0a, 0x0a, 0x5f, 0x72, 0x61, 0x77, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x42, 0x0c, 0x0a, 0x0a, - 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x22, 0x29, 0x0a, 0x13, 0x50, 0x75, - 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, - 0x72, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x68, 0x69, 0x6e, 0x74, 0x22, 0x27, 0x0a, 0x10, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x13, 0x0a, 0x02, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x02, 0x69, 0x64, 0x22, 0x6d, - 0x0a, 0x19, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, - 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x50, 0x61, 0x69, 0x72, 0x12, 0x3a, 0x0a, 0x03, 0x6f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, - 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, - 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, - 0x65, 0x72, 0x52, 0x03, 0x6f, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x58, 0x0a, - 0x11, 0x44, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x75, 0x69, 0x73, 0x68, 0x65, 0x64, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, - 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x33, 0x0a, 0x0f, 0x58, 0x35, 0x30, 0x39, 0x43, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x20, 0x0a, 0x09, 0x72, 0x61, - 0x77, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x08, 0x72, 0x61, 0x77, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x9e, 0x01, 0x0a, - 0x16, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, - 0x69, 0x76, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x32, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, - 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, - 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x76, - 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, - 0x18, 0x0a, 0x06, 0x72, 0x65, 0x67, 0x65, 0x78, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, - 0x00, 0x52, 0x06, 0x72, 0x65, 0x67, 0x65, 0x78, 0x70, 0x12, 0x16, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x42, 0x0a, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x22, 0x63, 0x0a, - 0x14, 0x58, 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x64, 0x65, - 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x58, 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x73, 0x22, 0x78, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, - 0x30, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, - 0x74, 0x12, 0x31, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x03, 0x65, 0x6e, - 0x64, 0x88, 0x01, 0x01, 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x65, 0x6e, 0x64, 0x2a, 0x75, 0x0a, 0x0d, - 0x48, 0x61, 0x73, 0x68, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x1e, 0x0a, - 0x1a, 0x48, 0x41, 0x53, 0x48, 0x5f, 0x41, 0x4c, 0x47, 0x4f, 0x52, 0x49, 0x54, 0x48, 0x4d, 0x5f, - 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, - 0x08, 0x53, 0x48, 0x41, 0x32, 0x5f, 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x53, - 0x48, 0x41, 0x32, 0x5f, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x41, - 0x32, 0x5f, 0x35, 0x31, 0x32, 0x10, 0x03, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x41, 0x33, 0x5f, - 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x41, 0x33, 0x5f, 0x33, 0x38, - 0x34, 0x10, 0x05, 0x2a, 0x8f, 0x05, 0x0a, 0x10, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, - 0x79, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x22, 0x0a, 0x1e, 0x50, 0x55, 0x42, 0x4c, - 0x49, 0x43, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x44, 0x45, 0x54, 0x41, 0x49, 0x4c, 0x53, 0x5f, 0x55, - 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x19, 0x0a, 0x11, - 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x56, - 0x35, 0x10, 0x01, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x15, 0x0a, 0x0d, 0x50, 0x4b, 0x43, 0x53, 0x31, - 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x10, 0x02, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x18, - 0x0a, 0x10, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, - 0x56, 0x35, 0x10, 0x03, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x14, 0x0a, 0x0c, 0x50, 0x4b, 0x49, 0x58, - 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x10, 0x04, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x21, - 0x0a, 0x1d, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, - 0x56, 0x31, 0x35, 0x5f, 0x32, 0x30, 0x34, 0x38, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, - 0x09, 0x12, 0x21, 0x0a, 0x1d, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, - 0x43, 0x53, 0x31, 0x56, 0x31, 0x35, 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x32, - 0x35, 0x36, 0x10, 0x0a, 0x12, 0x21, 0x0a, 0x1d, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x52, 0x53, 0x41, - 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x56, 0x31, 0x35, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, - 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x0b, 0x12, 0x1c, 0x0a, 0x18, 0x50, 0x4b, 0x49, 0x58, 0x5f, - 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x32, 0x30, 0x34, 0x38, 0x5f, 0x53, 0x48, 0x41, - 0x32, 0x35, 0x36, 0x10, 0x10, 0x12, 0x1c, 0x0a, 0x18, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x52, 0x53, - 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, - 0x36, 0x10, 0x11, 0x12, 0x1c, 0x0a, 0x18, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x52, 0x53, 0x41, 0x5f, - 0x50, 0x53, 0x53, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, - 0x12, 0x12, 0x24, 0x0a, 0x1c, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, - 0x50, 0x32, 0x35, 0x36, 0x5f, 0x48, 0x4d, 0x41, 0x43, 0x5f, 0x53, 0x48, 0x41, 0x5f, 0x32, 0x35, - 0x36, 0x10, 0x06, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x1b, 0x0a, 0x17, 0x50, 0x4b, 0x49, 0x58, 0x5f, - 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x50, 0x32, 0x35, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x5f, 0x32, - 0x35, 0x36, 0x10, 0x05, 0x12, 0x1b, 0x0a, 0x17, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45, 0x43, 0x44, - 0x53, 0x41, 0x5f, 0x50, 0x33, 0x38, 0x34, 0x5f, 0x53, 0x48, 0x41, 0x5f, 0x33, 0x38, 0x34, 0x10, - 0x0c, 0x12, 0x1b, 0x0a, 0x17, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, - 0x50, 0x35, 0x32, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x5f, 0x35, 0x31, 0x32, 0x10, 0x0d, 0x12, 0x10, - 0x0a, 0x0c, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x07, - 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, - 0x5f, 0x50, 0x48, 0x10, 0x08, 0x12, 0x1f, 0x0a, 0x17, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45, 0x43, - 0x44, 0x53, 0x41, 0x5f, 0x50, 0x33, 0x38, 0x34, 0x5f, 0x53, 0x48, 0x41, 0x5f, 0x32, 0x35, 0x36, - 0x10, 0x13, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x1f, 0x0a, 0x17, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45, - 0x43, 0x44, 0x53, 0x41, 0x5f, 0x50, 0x35, 0x32, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x5f, 0x32, 0x35, - 0x36, 0x10, 0x14, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x12, 0x0a, 0x0a, 0x4c, 0x4d, 0x53, 0x5f, 0x53, - 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x0e, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x14, 0x0a, 0x0c, 0x4c, - 0x4d, 0x4f, 0x54, 0x53, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x0f, 0x1a, 0x02, 0x08, - 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4d, 0x4c, 0x5f, 0x44, 0x53, 0x41, 0x5f, 0x36, 0x35, 0x10, 0x15, - 0x12, 0x0d, 0x0a, 0x09, 0x4d, 0x4c, 0x5f, 0x44, 0x53, 0x41, 0x5f, 0x38, 0x37, 0x10, 0x16, 0x22, - 0x04, 0x08, 0x17, 0x10, 0x32, 0x2a, 0x6f, 0x0a, 0x1a, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x2d, 0x0a, 0x29, 0x53, 0x55, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x41, - 0x4c, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x5f, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, - 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x4d, 0x41, 0x49, 0x4c, 0x10, 0x01, 0x12, 0x07, 0x0a, - 0x03, 0x55, 0x52, 0x49, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x4f, 0x54, 0x48, 0x45, 0x52, 0x5f, - 0x4e, 0x41, 0x4d, 0x45, 0x10, 0x03, 0x42, 0x7c, 0x0a, 0x1c, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, - 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2d, 0x73, 0x70, 0x65, 0x63, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, - 0x2d, 0x67, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0xea, 0x02, 0x14, - 0x53, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) +const file_sigstore_common_proto_rawDesc = "" + + "\n" + + "\x15sigstore_common.proto\x12\x16dev.sigstore.common.v1\x1a\x1fgoogle/api/field_behavior.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"i\n" + + "\n" + + "HashOutput\x12C\n" + + "\talgorithm\x18\x01 \x01(\x0e2%.dev.sigstore.common.v1.HashAlgorithmR\talgorithm\x12\x16\n" + + "\x06digest\x18\x02 \x01(\fR\x06digest\"\x80\x01\n" + + "\x10MessageSignature\x12I\n" + + "\x0emessage_digest\x18\x01 \x01(\v2\".dev.sigstore.common.v1.HashOutputR\rmessageDigest\x12!\n" + + "\tsignature\x18\x02 \x01(\fB\x03\xe0A\x02R\tsignature\"#\n" + + "\x05LogId\x12\x1a\n" + + "\x06key_id\x18\x01 \x01(\fB\x03\xe0A\x02R\x05keyId\"H\n" + + "\x16RFC3161SignedTimestamp\x12.\n" + + "\x10signed_timestamp\x18\x01 \x01(\fB\x03\xe0A\x02R\x0fsignedTimestamp\"\xd9\x01\n" + + "\tPublicKey\x12 \n" + + "\traw_bytes\x18\x01 \x01(\fH\x00R\brawBytes\x88\x01\x01\x12I\n" + + "\vkey_details\x18\x02 \x01(\x0e2(.dev.sigstore.common.v1.PublicKeyDetailsR\n" + + "keyDetails\x12C\n" + + "\tvalid_for\x18\x03 \x01(\v2!.dev.sigstore.common.v1.TimeRangeH\x01R\bvalidFor\x88\x01\x01B\f\n" + + "\n" + + "_raw_bytesB\f\n" + + "\n" + + "_valid_for\")\n" + + "\x13PublicKeyIdentifier\x12\x12\n" + + "\x04hint\x18\x01 \x01(\tR\x04hint\"'\n" + + "\x10ObjectIdentifier\x12\x13\n" + + "\x02id\x18\x01 \x03(\x05B\x03\xe0A\x02R\x02id\"m\n" + + "\x19ObjectIdentifierValuePair\x12:\n" + + "\x03oid\x18\x01 \x01(\v2(.dev.sigstore.common.v1.ObjectIdentifierR\x03oid\x12\x14\n" + + "\x05value\x18\x02 \x01(\fR\x05value\"X\n" + + "\x11DistinguishedName\x12\"\n" + + "\forganization\x18\x01 \x01(\tR\forganization\x12\x1f\n" + + "\vcommon_name\x18\x02 \x01(\tR\n" + + "commonName\"3\n" + + "\x0fX509Certificate\x12 \n" + + "\traw_bytes\x18\x01 \x01(\fB\x03\xe0A\x02R\brawBytes\"\x9e\x01\n" + + "\x16SubjectAlternativeName\x12F\n" + + "\x04type\x18\x01 \x01(\x0e22.dev.sigstore.common.v1.SubjectAlternativeNameTypeR\x04type\x12\x18\n" + + "\x06regexp\x18\x02 \x01(\tH\x00R\x06regexp\x12\x16\n" + + "\x05value\x18\x03 \x01(\tH\x00R\x05valueB\n" + + "\n" + + "\bidentity\"c\n" + + "\x14X509CertificateChain\x12K\n" + + "\fcertificates\x18\x01 \x03(\v2'.dev.sigstore.common.v1.X509CertificateR\fcertificates\"x\n" + + "\tTimeRange\x120\n" + + "\x05start\x18\x01 \x01(\v2\x1a.google.protobuf.TimestampR\x05start\x121\n" + + "\x03end\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampH\x00R\x03end\x88\x01\x01B\x06\n" + + "\x04_end*}\n" + + "\rHashAlgorithm\x12\x1e\n" + + "\x1aHASH_ALGORITHM_UNSPECIFIED\x10\x00\x12\f\n" + + "\bSHA2_256\x10\x01\x12\f\n" + + "\bSHA2_384\x10\x02\x12\f\n" + + "\bSHA2_512\x10\x03\x12\x10\n" + + "\bSHA3_256\x10\x04\x1a\x02\b\x01\x12\x10\n" + + "\bSHA3_384\x10\x05\x1a\x02\b\x01*\x9e\x05\n" + + "\x10PublicKeyDetails\x12\"\n" + + "\x1ePUBLIC_KEY_DETAILS_UNSPECIFIED\x10\x00\x12\x19\n" + + "\x11PKCS1_RSA_PKCS1V5\x10\x01\x1a\x02\b\x01\x12\x15\n" + + "\rPKCS1_RSA_PSS\x10\x02\x1a\x02\b\x01\x12\x18\n" + + "\x10PKIX_RSA_PKCS1V5\x10\x03\x1a\x02\b\x01\x12\x14\n" + + "\fPKIX_RSA_PSS\x10\x04\x1a\x02\b\x01\x12!\n" + + "\x1dPKIX_RSA_PKCS1V15_2048_SHA256\x10\t\x12!\n" + + "\x1dPKIX_RSA_PKCS1V15_3072_SHA256\x10\n" + + "\x12!\n" + + "\x1dPKIX_RSA_PKCS1V15_4096_SHA256\x10\v\x12\x1c\n" + + "\x18PKIX_RSA_PSS_2048_SHA256\x10\x10\x12\x1c\n" + + "\x18PKIX_RSA_PSS_3072_SHA256\x10\x11\x12\x1c\n" + + "\x18PKIX_RSA_PSS_4096_SHA256\x10\x12\x12$\n" + + "\x1cPKIX_ECDSA_P256_HMAC_SHA_256\x10\x06\x1a\x02\b\x01\x12\x1b\n" + + "\x17PKIX_ECDSA_P256_SHA_256\x10\x05\x12\x1b\n" + + "\x17PKIX_ECDSA_P384_SHA_384\x10\f\x12\x1b\n" + + "\x17PKIX_ECDSA_P521_SHA_512\x10\r\x12\x10\n" + + "\fPKIX_ED25519\x10\a\x12\x13\n" + + "\x0fPKIX_ED25519_PH\x10\b\x12\x1f\n" + + "\x17PKIX_ECDSA_P384_SHA_256\x10\x13\x1a\x02\b\x01\x12\x1f\n" + + "\x17PKIX_ECDSA_P521_SHA_256\x10\x14\x1a\x02\b\x01\x12\x12\n" + + "\n" + + "LMS_SHA256\x10\x0e\x1a\x02\b\x01\x12\x14\n" + + "\fLMOTS_SHA256\x10\x0f\x1a\x02\b\x01\x12\r\n" + + "\tML_DSA_44\x10\x17\x12\r\n" + + "\tML_DSA_65\x10\x15\x12\r\n" + + "\tML_DSA_87\x10\x16\"\x04\b\x18\x102*o\n" + + "\x1aSubjectAlternativeNameType\x12-\n" + + ")SUBJECT_ALTERNATIVE_NAME_TYPE_UNSPECIFIED\x10\x00\x12\t\n" + + "\x05EMAIL\x10\x01\x12\a\n" + + "\x03URI\x10\x02\x12\x0e\n" + + "\n" + + "OTHER_NAME\x10\x03B|\n" + + "\x1cdev.sigstore.proto.common.v1B\vCommonProtoP\x01Z6github.com/sigstore/protobuf-specs/gen/pb-go/common/v1\xea\x02\x14Sigstore::Common::V1b\x06proto3" var ( file_sigstore_common_proto_rawDescOnce sync.Once diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/algorithm_registry.go b/vendor/github.com/sigstore/sigstore/pkg/signature/algorithm_registry.go index 02c032b02..802256ac6 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/algorithm_registry.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/algorithm_registry.go @@ -62,7 +62,7 @@ type AlgorithmDetails struct { // The underlying type of these parameters is dependent on the keyType. // For example, ECDSA algorithms will store an elliptic curve here whereas, RSA keys will store the key size. // Algorithms that don't require any extra parameters leave this set to nil. - extraKeyParams interface{} + extraKeyParams any // flagValue is a string representation of the signature algorithm that follows the naming conventions of CLI // arguments that are used for Sigstore services. @@ -157,7 +157,7 @@ var supportedAlgorithms = []AlgorithmDetails{ {v1.PublicKeyDetails_PKIX_RSA_PKCS1V15_4096_SHA256, RSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, RSAKeySize(4096), "rsa-sign-pkcs1-4096-sha256"}, {v1.PublicKeyDetails_PKIX_RSA_PSS_2048_SHA256, RSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, RSAKeySize(2048), "rsa-sign-pss-2048-sha256"}, {v1.PublicKeyDetails_PKIX_RSA_PSS_3072_SHA256, RSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, RSAKeySize(3072), "rsa-sign-pss-3072-sha256"}, - {v1.PublicKeyDetails_PKIX_RSA_PSS_4096_SHA256, RSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, RSAKeySize(4096), "rsa-sign-pss-4092-sha256"}, + {v1.PublicKeyDetails_PKIX_RSA_PSS_4096_SHA256, RSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, RSAKeySize(4096), "rsa-sign-pss-4096-sha256"}, {v1.PublicKeyDetails_PKIX_ECDSA_P256_SHA_256, ECDSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, elliptic.P256(), "ecdsa-sha2-256-nistp256"}, {v1.PublicKeyDetails_PKIX_ECDSA_P384_SHA_384, ECDSA, crypto.SHA384, v1.HashAlgorithm_SHA2_384, elliptic.P384(), "ecdsa-sha2-384-nistp384"}, {v1.PublicKeyDetails_PKIX_ECDSA_P384_SHA_256, ECDSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, elliptic.P384(), "ecdsa-sha2-256-nistp384"}, //nolint:staticcheck diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/message.go b/vendor/github.com/sigstore/sigstore/pkg/signature/message.go index 44771ff3d..bd715b0c5 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/message.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/message.go @@ -21,18 +21,14 @@ import ( "errors" "fmt" "io" + "slices" ) func isSupportedAlg(alg crypto.Hash, supportedAlgs []crypto.Hash) bool { if supportedAlgs == nil { return true } - for _, supportedAlg := range supportedAlgs { - if alg == supportedAlg { - return true - } - } - return false + return slices.Contains(supportedAlgs, alg) } // ComputeDigestForSigning calculates the digest value for the specified message using a hash function selected by the following process: diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go b/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go index cab6f5b98..58cbff797 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go @@ -29,8 +29,8 @@ const CosignSignatureType = "cosign container image signature" // SimpleContainerImage describes the structure of a basic container image signature payload, as defined at: // https://github.com/containers/image/blob/main/docs/containers-signature.5.md#json-data-format type SimpleContainerImage struct { - Critical Critical `json:"critical"` // Critical data critical to correctly evaluating the validity of the signature - Optional map[string]interface{} `json:"optional"` // Optional optional metadata about the image + Critical Critical `json:"critical"` // Critical data critical to correctly evaluating the validity of the signature + Optional map[string]any `json:"optional"` // Optional optional metadata about the image } // Critical data critical to correctly evaluating the validity of a signature @@ -65,7 +65,7 @@ type Cosign struct { // - Older versions of cosign generate signatures where ClaimedIdentity only contains a registry/…/repo ; signature consumers should allow users // to determine whether such images should be accepted (and, long-term, the default SHOULD be to reject them) ClaimedIdentity string - Annotations map[string]interface{} + Annotations map[string]any } // SimpleContainerImage returns information about a container image in the github.com/containers/image/signature format diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/util.go b/vendor/github.com/sigstore/sigstore/pkg/signature/util.go index 3f8beff49..e4d7c4190 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/util.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/util.go @@ -28,7 +28,7 @@ import ( ) // SignImage signs a container manifest using the specified signer object -func SignImage(signer SignerVerifier, image name.Digest, optionalAnnotations map[string]interface{}) (payload, signature []byte, err error) { +func SignImage(signer SignerVerifier, image name.Digest, optionalAnnotations map[string]any) (payload, signature []byte, err error) { imgPayload := sigpayload.Cosign{ Image: image, Annotations: optionalAnnotations, @@ -45,7 +45,7 @@ func SignImage(signer SignerVerifier, image name.Digest, optionalAnnotations map } // VerifyImageSignature verifies a signature over a container manifest -func VerifyImageSignature(signer SignerVerifier, payload, signature []byte) (image name.Digest, annotations map[string]interface{}, err error) { +func VerifyImageSignature(signer SignerVerifier, payload, signature []byte) (image name.Digest, annotations map[string]any, err error) { if err := signer.VerifySignature(bytes.NewReader(signature), bytes.NewReader(payload)); err != nil { return name.Digest{}, nil, fmt.Errorf("signature verification failed: %w", err) } diff --git a/vendor/go.opentelemetry.io/contrib/bridges/prometheus/version.go b/vendor/go.opentelemetry.io/contrib/bridges/prometheus/version.go index 2bdc4f06e..0dec25fc7 100644 --- a/vendor/go.opentelemetry.io/contrib/bridges/prometheus/version.go +++ b/vendor/go.opentelemetry.io/contrib/bridges/prometheus/version.go @@ -4,4 +4,4 @@ package prometheus // import "go.opentelemetry.io/contrib/bridges/prometheus" // Version is the current release version of the prometheus bridge. -const Version = "0.65.0" +const Version = "0.68.0" diff --git a/vendor/go.opentelemetry.io/contrib/exporters/autoexport/metrics.go b/vendor/go.opentelemetry.io/contrib/exporters/autoexport/metrics.go index ab850d310..d2e8df0c9 100644 --- a/vendor/go.opentelemetry.io/contrib/exporters/autoexport/metrics.go +++ b/vendor/go.opentelemetry.io/contrib/exporters/autoexport/metrics.go @@ -199,7 +199,7 @@ func init() { host := getenv("OTEL_EXPORTER_PROMETHEUS_HOST", "localhost") port := getenv("OTEL_EXPORTER_PROMETHEUS_PORT", "9464") addr := host + ":" + port - lis, err := net.Listen("tcp", addr) + lis, err := (&net.ListenConfig{}).Listen(context.Background(), "tcp", addr) if err != nil { return nil, errors.Join( fmt.Errorf("binding address %s for Prometheus exporter: %w", addr, err), @@ -295,7 +295,7 @@ func (pr producerRegistry) create(ctx context.Context) ([]metric.Producer, error func dedupedMetricProducers(envValue string) []string { producers := make(map[string]struct{}) - for _, producer := range strings.Split(envValue, ",") { + for producer := range strings.SplitSeq(envValue, ",") { producers[producer] = struct{}{} } diff --git a/vendor/go.opentelemetry.io/contrib/exporters/autoexport/version.go b/vendor/go.opentelemetry.io/contrib/exporters/autoexport/version.go new file mode 100644 index 000000000..6cb53c94f --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/exporters/autoexport/version.go @@ -0,0 +1,7 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package autoexport // import "go.opentelemetry.io/contrib/exporters/autoexport" + +// Version is the current release version of the autoexport module. +const Version = "0.68.0" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go index f7338b7e5..a7d4b2a81 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go @@ -194,6 +194,9 @@ func WithServerName(server string) Option { // WithMetricAttributesFn returns an Option to set a function that maps an HTTP request to a slice of attribute.KeyValue. // These attributes will be included in metrics for every request. +// +// Deprecated: WithMetricAttributesFn is deprecated and will be removed in a +// future release. Use [Labeler] instead. func WithMetricAttributesFn(metricAttributesFn func(r *http.Request) []attribute.KeyValue) Option { return optionFunc(func(c *config) { c.MetricAttributesFn = metricAttributesFn diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index 1ecd4be2d..a269fce0f 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -184,30 +184,26 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http statusCode := rww.StatusCode() bytesWritten := rww.BytesWritten() span.SetStatus(h.semconv.Status(statusCode)) + bytesRead := bw.BytesRead() span.SetAttributes(h.semconv.ResponseTraceAttrs(semconv.ResponseTelemetry{ StatusCode: statusCode, - ReadBytes: bw.BytesRead(), + ReadBytes: bytesRead, ReadError: bw.Error(), WriteBytes: bytesWritten, WriteError: rww.Error(), })...) - // Use floating point division here for higher precision (instead of Millisecond method). - elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) - - metricAttributes := semconv.MetricAttributes{ - Req: r, - StatusCode: statusCode, - AdditionalAttributes: append(labeler.Get(), h.metricAttributesFromRequest(r)...), - } - h.semconv.RecordMetrics(ctx, semconv.ServerMetricData{ - ServerName: h.server, - ResponseSize: bytesWritten, - MetricAttributes: metricAttributes, + ServerName: h.server, + ResponseSize: bytesWritten, + MetricAttributes: semconv.MetricAttributes{ + Req: r, + StatusCode: statusCode, + AdditionalAttributes: append(labeler.Get(), h.metricAttributesFromRequest(r)...), + }, MetricData: semconv.MetricData{ - RequestSize: bw.BytesRead(), - ElapsedTime: elapsedTime, + RequestSize: bytesRead, + RequestDuration: time.Since(requestStartTime), }, }) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go index ca2e4c14c..f29f9b7c9 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go @@ -61,7 +61,7 @@ func (w *RespWriterWrapper) Write(p []byte) (int, error) { // WriteHeader persists initial statusCode for span attribution. // All calls to WriteHeader will be propagated to the underlying ResponseWriter -// and will persist the statusCode from the first call. +// and will persist the statusCode from the first call (except for informational response status codes). // Blocking consecutive calls to WriteHeader alters expected behavior and will // remove warning logs from net/http where developers will notice incorrect handler implementations. func (w *RespWriterWrapper) WriteHeader(statusCode int) { @@ -77,6 +77,13 @@ func (w *RespWriterWrapper) WriteHeader(statusCode int) { // parent method. func (w *RespWriterWrapper) writeHeader(statusCode int) { if !w.wroteHeader { + // Ignore informational response status codes. + // Based on https://github.com/golang/go/blob/go1.24.1/src/net/http/server.go#L1216 + if statusCode >= 100 && statusCode <= 199 && statusCode != http.StatusSwitchingProtocols { + w.ResponseWriter.WriteHeader(statusCode) + return + } + w.wroteHeader = true w.statusCode = statusCode } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/client.go index 29d6f508c..1398d85c2 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/client.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/client.go @@ -19,11 +19,11 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/semconv/v1.39.0" - "go.opentelemetry.io/otel/semconv/v1.39.0/httpconv" + "go.opentelemetry.io/otel/semconv/v1.40.0" + "go.opentelemetry.io/otel/semconv/v1.40.0/httpconv" ) -type HTTPClient struct{ +type HTTPClient struct { requestBodySize httpconv.ClientRequestBodySize requestDuration httpconv.ClientRequestDuration } @@ -57,14 +57,14 @@ func (n HTTPClient) Status(code int) (codes.Code, string) { // RequestTraceAttrs returns trace attributes for an HTTP request made by a client. func (n HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { /* - below attributes are returned: - - http.request.method - - http.request.method.original - - url.full - - server.address - - server.port - - network.protocol.name - - network.protocol.version + below attributes are returned: + - http.request.method + - http.request.method.original + - url.full + - server.address + - server.port + - network.protocol.name + - network.protocol.version */ numOfAttributes := 3 // URL, server address, proto, and method. @@ -139,9 +139,9 @@ func (n HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { // ResponseTraceAttrs returns trace attributes for an HTTP response made by a client. func (n HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { /* - below attributes are returned: - - http.response.status_code - - error.type + below attributes are returned: + - http.response.status_code + - error.type */ var count int if resp.StatusCode > 0 { @@ -247,22 +247,26 @@ func (o MetricOpts) AddOptions() metric.AddOption { return o.addOptions } -func (n HTTPClient) MetricOptions(ma MetricAttributes) map[string]MetricOpts { - opts := map[string]MetricOpts{} - +func (n HTTPClient) MetricOptions(ma MetricAttributes) MetricOpts { attributes := n.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) set := metric.WithAttributeSet(attribute.NewSet(attributes...)) - opts["new"] = MetricOpts{ + + return MetricOpts{ measurement: set, addOptions: set, } - - return opts } -func (n HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts map[string]MetricOpts) { - n.requestBodySize.Inst().Record(ctx, md.RequestSize, opts["new"].MeasurementOption()) - n.requestDuration.Inst().Record(ctx, md.ElapsedTime/1000, opts["new"].MeasurementOption()) +func (n HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts MetricOpts) { + recordOpts := metricRecordOptionPool.Get().(*[]metric.RecordOption) + defer func() { + *recordOpts = (*recordOpts)[:0] + metricRecordOptionPool.Put(recordOpts) + }() + *recordOpts = append(*recordOpts, opts.MeasurementOption()) + + n.requestBodySize.Inst().Record(ctx, md.RequestSize, *recordOpts...) + n.requestDuration.Inst().Record(ctx, durationToSeconds(md.RequestDuration), *recordOpts...) } // TraceAttributes returns attributes for httptrace. diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/server.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/server.go index e0e9ebc05..83c6ae246 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/server.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/server.go @@ -15,12 +15,13 @@ import ( "slices" "strings" "sync" + "time" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/semconv/v1.39.0" - "go.opentelemetry.io/otel/semconv/v1.39.0/httpconv" + "go.opentelemetry.io/otel/semconv/v1.40.0" + "go.opentelemetry.io/otel/semconv/v1.40.0/httpconv" ) type RequestTraceAttrsOpts struct { @@ -36,7 +37,7 @@ type ResponseTelemetry struct { WriteError error } -type HTTPServer struct{ +type HTTPServer struct { requestBodySizeHistogram httpconv.ServerRequestBodySize responseBodySizeHistogram httpconv.ServerResponseBodySize requestDurationHistogram httpconv.ServerRequestDuration @@ -245,19 +246,11 @@ type MetricAttributes struct { } type MetricData struct { - RequestSize int64 - - // The request duration, in milliseconds - ElapsedTime float64 + RequestSize int64 + RequestDuration time.Duration } var ( - metricAddOptionPool = &sync.Pool{ - New: func() any { - return &[]metric.AddOption{} - }, - } - metricRecordOptionPool = &sync.Pool{ New: func() any { return &[]metric.RecordOption{} @@ -272,7 +265,7 @@ func (n HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) { *recordOpts = append(*recordOpts, o) n.requestBodySizeHistogram.Inst().Record(ctx, md.RequestSize, *recordOpts...) n.responseBodySizeHistogram.Inst().Record(ctx, md.ResponseSize, *recordOpts...) - n.requestDurationHistogram.Inst().Record(ctx, md.ElapsedTime/1000.0, o) + n.requestDurationHistogram.Inst().Record(ctx, durationToSeconds(md.RequestDuration), o) *recordOpts = (*recordOpts)[:0] metricRecordOptionPool.Put(recordOpts) } @@ -371,10 +364,12 @@ func (n HTTPServer) MetricAttributes(server string, req *http.Request, statusCod if statusCode > 0 { num++ } - + if route == "" && req.Pattern != "" { + route = httpRoute(req.Pattern) + } if route != "" { - num++ - } + num++ + } attributes := slices.Grow(additionalAttributes, num) attributes = append(attributes, @@ -397,7 +392,7 @@ func (n HTTPServer) MetricAttributes(server string, req *http.Request, statusCod } if route != "" { - attributes = append(attributes, semconv.HTTPRoute(route)) - } + attributes = append(attributes, semconv.HTTPRoute(route)) + } return attributes } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go index 131fda489..2eab2ecab 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go @@ -11,10 +11,11 @@ import ( "net/http" "strconv" "strings" + "time" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - semconvNew "go.opentelemetry.io/otel/semconv/v1.39.0" + semconvNew "go.opentelemetry.io/otel/semconv/v1.40.0" ) // SplitHostPort splits a network address hostport of the form "host", @@ -125,3 +126,8 @@ func standardizeHTTPMethod(method string) string { } return method } + +func durationToSeconds(d time.Duration) float64 { + // Use floating point division here for higher precision (instead of Seconds method). + return float64(d) / float64(time.Second) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go index 59b6c5498..d8d204d1f 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -5,7 +5,6 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http import ( "context" - "fmt" "io" "net/http" "net/http/httptrace" @@ -16,7 +15,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/propagation" - otelsemconv "go.opentelemetry.io/otel/semconv/v1.39.0" + otelsemconv "go.opentelemetry.io/otel/semconv/v1.40.0" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" @@ -85,8 +84,6 @@ func defaultTransportFormatter(_ string, r *http.Request) string { // RoundTrip creates a Span and propagates its context via the provided request's headers // before handing the request to the configured base RoundTripper. The created span will // end when the response body is closed or when a read from the body returns io.EOF. -// If GetBody returns an error, the error is reported via otel.Handle and the request -// continues with the original Body. func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { requestStartTime := time.Now() for _, f := range t.filters { @@ -106,9 +103,7 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { } } - opts := append([]trace.SpanStartOption{}, t.spanStartOptions...) // start with the configured options - - ctx, span := tracer.Start(r.Context(), t.spanNameFormatter("", r), opts...) + ctx, span := tracer.Start(r.Context(), t.spanNameFormatter("", r), t.spanStartOptions...) if t.clientTrace != nil { ctx = httptrace.WithClientTrace(ctx, t.clientTrace(ctx)) @@ -121,23 +116,26 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request. - // GetBody is preferred over direct access to Body if the function is set. - // If the resulting body is nil or is NoBody, we don't want to mutate the body as it - // will affect the identity of it in an unforeseeable way because we assert - // ReadCloser fulfills a certain interface and it is indeed nil or NoBody. - body := r.Body - if r.GetBody != nil { - b, err := r.GetBody() - if err != nil { - otel.Handle(fmt.Errorf("http.Request GetBody returned an error: %w", err)) - } else { - body = b + var lastBW *request.BodyWrapper // Records the last body wrapper. Can be nil. + maybeWrapBody := func(body io.ReadCloser) io.ReadCloser { + if body == nil || body == http.NoBody { + return body } + bw := request.NewBodyWrapper(body, func(int64) {}) + lastBW = bw + return bw } - - bw := request.NewBodyWrapper(body, func(int64) {}) - if body != nil && body != http.NoBody { - r.Body = bw + r.Body = maybeWrapBody(r.Body) + if r.GetBody != nil { + originalGetBody := r.GetBody + r.GetBody = func() (io.ReadCloser, error) { + b, err := originalGetBody() + if err != nil { + lastBW = nil // The underlying transport will fail to make a retry request, hence, record no data. + return nil, err + } + return maybeWrapBody(b), nil + } } span.SetAttributes(t.semconv.RequestTraceAttrs(r)...) @@ -145,35 +143,27 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { res, err := t.rt.RoundTrip(r) - // Defer metrics recording function to record the metrics on error or no error. - defer func() { - metricAttributes := semconv.MetricAttributes{ + // Record the metrics on error or no error. + statusCode := 0 + if err == nil { + statusCode = res.StatusCode + } + var requestSize int64 + if lastBW != nil { + requestSize = lastBW.BytesRead() + } + t.semconv.RecordMetrics( + ctx, + semconv.MetricData{ + RequestSize: requestSize, + RequestDuration: time.Since(requestStartTime), + }, + t.semconv.MetricOptions(semconv.MetricAttributes{ Req: r, + StatusCode: statusCode, AdditionalAttributes: append(labeler.Get(), t.metricAttributesFromRequest(r)...), - } - - if err == nil { - metricAttributes.StatusCode = res.StatusCode - } - - metricOpts := t.semconv.MetricOptions(metricAttributes) - - metricData := semconv.MetricData{ - RequestSize: bw.BytesRead(), - } - - if err == nil { - readRecordFunc := func(int64) {} - res.Body = newWrappedBody(span, readRecordFunc, res.Body) - } - - // Use floating point division here for higher precision (instead of Millisecond method). - elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) - - metricData.ElapsedTime = elapsedTime - - t.semconv.RecordMetrics(ctx, metricData, metricOpts) - }() + }), + ) if err != nil { span.SetAttributes(otelsemconv.ErrorType(err)) @@ -183,6 +173,8 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { return res, err } + readRecordFunc := func(int64) {} + res.Body = newWrappedBody(span, readRecordFunc, res.Body) // traces span.SetAttributes(t.semconv.ResponseTraceAttrs(res)...) span.SetStatus(t.semconv.Status(res.StatusCode)) diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index d0107952e..835ec5aa7 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -4,4 +4,4 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" // Version is the current release version of the otelhttp instrumentation. -const Version = "0.65.0" +const Version = "0.68.0" diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index d48722875..db1f55101 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -17,6 +17,7 @@ linters: - ineffassign - misspell - modernize + - noctx - perfsprint - revive - staticcheck @@ -88,6 +89,16 @@ linters: deny: - pkg: go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal desc: Do not use cross-module internal packages. + semconv: + list-mode: lax + files: + - "!**/semconv/**" + - "!**/exporters/zipkin/**" + deny: + - pkg: go.opentelemetry.io/otel/semconv + desc: "Use go.opentelemetry.io/otel/semconv/v1.40.0 instead. If a newer semconv version has been released, update the depguard rule." + allow: + - go.opentelemetry.io/otel/semconv/v1.40.0 gocritic: disabled-checks: - appendAssign @@ -194,6 +205,7 @@ linters: arguments: - ["ID"] # AllowList - ["Otel", "Aws", "Gcp"] # DenyList + - - skip-package-name-collision-with-go-std: true - name: waitgroup-by-value testifylint: enable-all: true diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index e725282be..20edda441 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -11,6 +11,90 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm +## [1.43.0/0.65.0/0.19.0] 2026-04-02 + +### Added + +- Add `IsRandom` and `WithRandom` on `TraceFlags`, and `IsRandom` on `SpanContext` in `go.opentelemetry.io/otel/trace` for [W3C Trace Context Level 2 Random Trace ID Flag](https://www.w3.org/TR/trace-context-2/#random-trace-id-flag) support. (#8012) +- Add service detection with `WithService` in `go.opentelemetry.io/otel/sdk/resource`. (#7642) +- Add `DefaultWithContext` and `EnvironmentWithContext` in `go.opentelemetry.io/otel/sdk/resource` to support plumbing `context.Context` through default and environment detectors. (#8051) +- Support attributes with empty value (`attribute.EMPTY`) in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#8038) +- Support attributes with empty value (`attribute.EMPTY`) in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#8038) +- Support attributes with empty value (`attribute.EMPTY`) in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#8038) +- Support attributes with empty value (`attribute.EMPTY`) in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#8038) +- Support attributes with empty value (`attribute.EMPTY`) in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#8038) +- Support attributes with empty value (`attribute.EMPTY`) in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#8038) +- Support attributes with empty value (`attribute.EMPTY`) in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest`. (#8038) +- Add support for per-series start time tracking for cumulative metrics in `go.opentelemetry.io/otel/sdk/metric`. + Set `OTEL_GO_X_PER_SERIES_START_TIMESTAMPS=true` to enable. (#8060) +- Add `WithCardinalityLimitSelector` for metric reader for configuring cardinality limits specific to the instrument kind. (#7855) + +### Changed + +- Introduce the `EMPTY` Type in `go.opentelemetry.io/otel/attribute` to reflect that an empty value is now a valid value, with `INVALID` remaining as a deprecated alias of `EMPTY`. (#8038) +- Improve slice handling in `go.opentelemetry.io/otel/attribute` to optimize short slice values with fixed-size fast paths. (#8039) +- Improve performance of span metric recording in `go.opentelemetry.io/otel/sdk/trace` by returning early if self-observability is not enabled. (#8067) +- Improve formatting of metric data diffs in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest`. (#8073) + +### Deprecated + +- Deprecate `INVALID` in `go.opentelemetry.io/otel/attribute`. Use `EMPTY` instead. (#8038) + +### Fixed + +- Return spec-compliant `TraceIdRatioBased` description. This is a breaking behavioral change, but it is necessary to + make the implementation [spec-compliant](https://opentelemetry.io/docs/specs/otel/trace/sdk/#traceidratiobased). (#8027) +- Fix a race condition in `go.opentelemetry.io/otel/sdk/metric` where the lastvalue aggregation could collect the value 0 even when no zero-value measurements were recorded. (#8056) +- Limit HTTP response body to 4 MiB in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` to mitigate excessive memory usage caused by a misconfigured or malicious server. + Responses exceeding the limit are treated as non-retryable errors. (#8108) +- Limit HTTP response body to 4 MiB in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` to mitigate excessive memory usage caused by a misconfigured or malicious server. + Responses exceeding the limit are treated as non-retryable errors. (#8108) +- Limit HTTP response body to 4 MiB in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` to mitigate excessive memory usage caused by a misconfigured or malicious server. + Responses exceeding the limit are treated as non-retryable errors. (#8108) +- `WithHostID` detector in `go.opentelemetry.io/otel/sdk/resource` to use full path for `kenv` command on BSD. (#8113) +- Fix missing `request.GetBody` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` to correctly handle HTTP2 GOAWAY frame. (#8096) + +## [1.42.0/0.64.0/0.18.0/0.0.16] 2026-03-06 + +### Added + +- Add `go.opentelemetry.io/otel/semconv/v1.40.0` package. + The package contains semantic conventions from the `v1.40.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.40.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.39.0`. (#7985) +- Add `Err` and `SetErr` on `Record` in `go.opentelemetry.io/otel/log` to attach an error and set record exception attributes in `go.opentelemetry.io/otel/log/sdk`. (#7924) + +### Changed + +- `TracerProvider.ForceFlush` in `go.opentelemetry.io/otel/sdk/trace` joins errors together and continues iteration through SpanProcessors as opposed to returning the first encountered error without attempting exports on subsequent SpanProcessors. (#7856) + +### Fixed + +- Fix missing `request.GetBody` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` to correctly handle HTTP2 GOAWAY frame. (#7931) +- Fix semconv v1.39.0 generated metric helpers skipping required attributes when extra attributes were empty. (#7964) +- Preserve W3C TraceFlags bitmask (including the random Trace ID flag) during trace context extraction and injection in `go.opentelemetry.io/otel/propagation`. (#7834) + +### Removed + +- Drop support for [Go 1.24]. (#7984) + +## [1.41.0/0.63.0/0.17.0/0.0.15] 2026-03-02 + +This release is the last to support [Go 1.24]. +The next release will require at least [Go 1.25]. + +### Added + +- Support testing of [Go 1.26]. (#7902) + +### Fixed + +- Update `Baggage` in `go.opentelemetry.io/otel/propagation` and `Parse` and `New` in `go.opentelemetry.io/otel/baggage` to comply with W3C Baggage specification limits. + `New` and `Parse` now return partial baggage along with an error when limits are exceeded. + Errors from baggage extraction are reported to the global error handler. (#7880) +- Return an error when the endpoint is configured as insecure and with TLS configuration in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7914) +- Return an error when the endpoint is configured as insecure and with TLS configuration in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#7914) +- Return an error when the endpoint is configured as insecure and with TLS configuration in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#7914) + ## [1.40.0/0.62.0/0.16.0] 2026-02-02 ### Added @@ -3535,7 +3619,10 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.40.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.43.0...HEAD +[1.43.0/0.65.0/0.19.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.43.0 +[1.42.0/0.64.0/0.18.0/0.0.16]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.42.0 +[1.41.0/0.63.0/0.17.0/0.0.15]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.41.0 [1.40.0/0.62.0/0.16.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.40.0 [1.39.0/0.61.0/0.15.0/0.0.14]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.39.0 [1.38.0/0.60.0/0.14.0/0.0.13]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.38.0 @@ -3635,6 +3722,7 @@ It contains api and sdk for trace and meter. +[Go 1.26]: https://go.dev/doc/go1.26 [Go 1.25]: https://go.dev/doc/go1.25 [Go 1.24]: https://go.dev/doc/go1.24 [Go 1.23]: https://go.dev/doc/go1.23 diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index 38dede932..12de3607a 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -746,8 +746,8 @@ Encapsulate setup in constructor functions, ensuring clear ownership and scope: import ( "errors" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" - "go.opentelemetry.io/otel/semconv/v1.39.0/otelconv" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" + "go.opentelemetry.io/otel/semconv/v1.40.0/otelconv" ) type SDKComponent struct { @@ -1039,7 +1039,7 @@ func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) All observability metrics should follow the [OpenTelemetry Semantic Conventions for SDK metrics](https://github.com/open-telemetry/semantic-conventions/blob/1cf2476ae5e518225a766990a28a6d5602bd5a30/docs/otel/sdk-metrics.md). -Use the metric semantic conventions convenience package [otelconv](./semconv/v1.39.0/otelconv/metric.go). +Use the metric semantic conventions convenience package [otelconv](./semconv/v1.40.0/otelconv/metric.go). ##### Component Identification diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index 44870248c..42466f2d6 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -38,10 +38,14 @@ CROSSLINK = $(TOOLS)/crosslink $(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink SEMCONVKIT = $(TOOLS)/semconvkit +SEMCONVKIT_FILES := $(sort $(shell find $(TOOLS_MOD_DIR)/semconvkit -type f)) $(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit +$(TOOLS)/semconvkit: $(SEMCONVKIT_FILES) VERIFYREADMES = $(TOOLS)/verifyreadmes +VERIFYREADMES_FILES := $(sort $(shell find $(TOOLS_MOD_DIR)/verifyreadmes -type f)) $(TOOLS)/verifyreadmes: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/verifyreadmes +$(TOOLS)/verifyreadmes: $(VERIFYREADMES_FILES) GOLANGCI_LINT = $(TOOLS)/golangci-lint $(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/v2/cmd/golangci-lint @@ -185,11 +189,10 @@ test-coverage: $(GOCOVMERGE) .PHONY: benchmark benchmark: $(OTEL_GO_MOD_DIRS:%=benchmark/%) benchmark/%: - @echo "$(GO) test -run=xxxxxMatchNothingxxxxx -bench=. $*..." \ - && cd $* \ - && $(GO) list ./... \ - | grep -v third_party \ - | xargs $(GO) test -run=xxxxxMatchNothingxxxxx -bench=. + cd $* && $(GO) test -run='^$$' -bench=. $(ARGS) ./... + +print-sharded-benchmarks: + @echo $(OTEL_GO_MOD_DIRS) | jq -cR 'split(" ")' .PHONY: golangci-lint golangci-lint-fix golangci-lint-fix: ARGS=--fix @@ -215,7 +218,7 @@ go-mod-tidy/%: crosslink && $(GO) mod tidy -compat=1.21 .PHONY: lint -lint: misspell go-mod-tidy golangci-lint govulncheck +lint: misspell go-mod-tidy golangci-lint .PHONY: vanity-import-check vanity-import-check: $(PORTO) diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index c63359543..16a72004c 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -53,20 +53,20 @@ Currently, this project supports the following environments. | OS | Go Version | Architecture | |----------|------------|--------------| +| Ubuntu | 1.26 | amd64 | | Ubuntu | 1.25 | amd64 | -| Ubuntu | 1.24 | amd64 | +| Ubuntu | 1.26 | 386 | | Ubuntu | 1.25 | 386 | -| Ubuntu | 1.24 | 386 | +| Ubuntu | 1.26 | arm64 | | Ubuntu | 1.25 | arm64 | -| Ubuntu | 1.24 | arm64 | +| macOS | 1.26 | amd64 | | macOS | 1.25 | amd64 | -| macOS | 1.24 | amd64 | +| macOS | 1.26 | arm64 | | macOS | 1.25 | arm64 | -| macOS | 1.24 | arm64 | +| Windows | 1.26 | amd64 | | Windows | 1.25 | amd64 | -| Windows | 1.24 | amd64 | +| Windows | 1.26 | 386 | | Windows | 1.25 | 386 | -| Windows | 1.24 | 386 | While this project should work for other systems, no compatibility guarantees are made for those systems currently. diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index 861756fd7..6aff7548c 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -4,7 +4,9 @@ Create a `Version Release` issue to track the release process. -## Semantic Convention Generation +## Semantic Convention Upgrade + +### Semantic Convention Generation New versions of the [OpenTelemetry Semantic Conventions] mean new versions of the `semconv` package need to be generated. The `semconv-generate` make target is used for this. @@ -22,6 +24,43 @@ make semconv-generate # Uses the exported TAG. This should create a new sub-package of [`semconv`](./semconv). Ensure things look correct before submitting a pull request to include the addition. +The `CHANGELOG.md` should also be updated to reflect the new changes: + +```md +- The `go.opentelemetry.io/otel/semconv/` package. The package contains semantic conventions from the `` version of the OpenTelemetry Semantic Conventions. See the [migration documentation](./semconv//MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/`. (#PR_NUMBER) +``` + +> **Tip:** Change to the release and prior version to match the changes + +### Update semconv imports + +Once the new semconv module has been generated, update all semconv imports throughout the codebase to reference the new version: + +```go +// Before +semconv "go.opentelemetry.io/otel/semconv/v1.37.0" +"go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" + + +// After +semconv "go.opentelemetry.io/otel/semconv/v1.39.0" +"go.opentelemetry.io/otel/semconv/v1.39.0/otelconv" +``` + +Once complete, run `make` to check for any compilation or test failures. + +#### Handling attribute changes + +Some semconv releases might add new attributes or impact attributes that are currently being used. Changes could stem from a simple renaming, to more complex changes like merging attributes and property values being changed. + +One should update the code to the new attributes that supersede the impacted ones, hence sticking to the semantic conventions. However, legacy attributes might still be emitted in accordance to the `OTEL_SEMCONV_STABILITY_OPT_IN` environment variable. + +For an example on how such migration might have to be tracked and performed, see issue [#7806](https://github.com/open-telemetry/opentelemetry-go/issues/7806). + +### Go contrib linter update + +Update [.golangci.yml](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/.golangci.yml) in [opentelemetry-go-contrib](https://github.com/open-telemetry/opentelemetry-go-contrib/) to mandate the new semconv version. + ## Breaking changes validation You can run `make gorelease` which runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes made in the public API. diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go index 6cc1a1655..771dd69c5 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/encoder.go +++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go @@ -53,7 +53,7 @@ var ( _ Encoder = &defaultAttrEncoder{} // encoderIDCounter is for generating IDs for other attribute encoders. - encoderIDCounter uint64 + encoderIDCounter atomic.Uint64 defaultEncoderOnce sync.Once defaultEncoderID = NewEncoderID() @@ -64,7 +64,7 @@ var ( // once per each type of attribute encoder. Preferably in init() or in var // definition. func NewEncoderID() EncoderID { - return EncoderID{value: atomic.AddUint64(&encoderIDCounter, 1)} + return EncoderID{value: encoderIDCounter.Add(1)} } // DefaultEncoder returns an attribute encoder that encodes attributes in such diff --git a/vendor/go.opentelemetry.io/otel/attribute/hash.go b/vendor/go.opentelemetry.io/otel/attribute/hash.go index 6aa69aeae..b09caaa6d 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/hash.go +++ b/vendor/go.opentelemetry.io/otel/attribute/hash.go @@ -27,6 +27,7 @@ const ( int64SliceID uint64 = 3762322556277578591 // "_[]int64" (little endian) float64SliceID uint64 = 7308324551835016539 // "[]double" (little endian) stringSliceID uint64 = 7453010373645655387 // "[]string" (little endian) + emptyID uint64 = 7305809155345288421 // "__empty_" (little endian) ) // hashKVs returns a new xxHash64 hash of kvs. @@ -80,7 +81,8 @@ func hashKV(h xxhash.Hash, kv KeyValue) xxhash.Hash { for i := 0; i < rv.Len(); i++ { h = h.String(rv.Index(i).String()) } - case INVALID: + case EMPTY: + h = h.Uint64(emptyID) default: // Logging is an alternative, but using the internal logger here // causes an import cycle so it is not done. diff --git a/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go b/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go index 7f5eae877..d9f51fa2d 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go +++ b/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go @@ -11,80 +11,63 @@ import ( "reflect" ) -// BoolSliceValue converts a bool slice into an array with same elements as slice. -func BoolSliceValue(v []bool) any { - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[bool]())).Elem() - reflect.Copy(cp, reflect.ValueOf(v)) - return cp.Interface() +// sliceElem is the exact set of element types stored in attribute slice values. +// Using a closed set prevents accidental instantiations for unsupported types. +type sliceElem interface { + bool | int64 | float64 | string } -// Int64SliceValue converts an int64 slice into an array with same elements as slice. -func Int64SliceValue(v []int64) any { - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[int64]())).Elem() - reflect.Copy(cp, reflect.ValueOf(v)) - return cp.Interface() -} - -// Float64SliceValue converts a float64 slice into an array with same elements as slice. -func Float64SliceValue(v []float64) any { - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[float64]())).Elem() - reflect.Copy(cp, reflect.ValueOf(v)) - return cp.Interface() -} +// SliceValue converts a slice into an array with the same elements. +func SliceValue[T sliceElem](v []T) any { + // Keep only the common tiny-slice cases out of reflection. Extending this + // much further increases code size for diminishing benefit while larger + // slices still need the generic reflective path to preserve comparability. + // This matches the short lengths that show up most often in local + // benchmarks and semantic convention examples while leaving larger, less + // predictable slices on the generic reflective path. + switch len(v) { + case 0: + return [0]T{} + case 1: + return [1]T{v[0]} + case 2: + return [2]T{v[0], v[1]} + case 3: + return [3]T{v[0], v[1], v[2]} + } -// StringSliceValue converts a string slice into an array with same elements as slice. -func StringSliceValue(v []string) any { - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[string]())).Elem() - reflect.Copy(cp, reflect.ValueOf(v)) - return cp.Interface() + return sliceValueReflect(v) } -// AsBoolSlice converts a bool array into a slice into with same elements as array. -func AsBoolSlice(v any) []bool { - rv := reflect.ValueOf(v) - if rv.Type().Kind() != reflect.Array { - return nil +// AsSlice converts an array into a slice with the same elements. +func AsSlice[T sliceElem](v any) []T { + // Mirror the small fixed-array fast path used by SliceValue. + switch a := v.(type) { + case [0]T: + return []T{} + case [1]T: + return []T{a[0]} + case [2]T: + return []T{a[0], a[1]} + case [3]T: + return []T{a[0], a[1], a[2]} } - cpy := make([]bool, rv.Len()) - if len(cpy) > 0 { - _ = reflect.Copy(reflect.ValueOf(cpy), rv) - } - return cpy -} -// AsInt64Slice converts an int64 array into a slice into with same elements as array. -func AsInt64Slice(v any) []int64 { - rv := reflect.ValueOf(v) - if rv.Type().Kind() != reflect.Array { - return nil - } - cpy := make([]int64, rv.Len()) - if len(cpy) > 0 { - _ = reflect.Copy(reflect.ValueOf(cpy), rv) - } - return cpy + return asSliceReflect[T](v) } -// AsFloat64Slice converts a float64 array into a slice into with same elements as array. -func AsFloat64Slice(v any) []float64 { - rv := reflect.ValueOf(v) - if rv.Type().Kind() != reflect.Array { - return nil - } - cpy := make([]float64, rv.Len()) - if len(cpy) > 0 { - _ = reflect.Copy(reflect.ValueOf(cpy), rv) - } - return cpy +func sliceValueReflect[T sliceElem](v []T) any { + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[T]())).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } -// AsStringSlice converts a string array into a slice into with same elements as array. -func AsStringSlice(v any) []string { +func asSliceReflect[T sliceElem](v any) []T { rv := reflect.ValueOf(v) - if rv.Type().Kind() != reflect.Array { + if !rv.IsValid() || rv.Kind() != reflect.Array || rv.Type().Elem() != reflect.TypeFor[T]() { return nil } - cpy := make([]string, rv.Len()) + cpy := make([]T, rv.Len()) if len(cpy) > 0 { _ = reflect.Copy(reflect.ValueOf(cpy), rv) } diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go index 8c6928ca7..0cc368018 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/kv.go +++ b/vendor/go.opentelemetry.io/otel/attribute/kv.go @@ -15,7 +15,7 @@ type KeyValue struct { // Valid reports whether kv is a valid OpenTelemetry attribute. func (kv KeyValue) Valid() bool { - return kv.Key.Defined() && kv.Value.Type() != INVALID + return kv.Key.Defined() } // Bool creates a KeyValue with a BOOL Value type. diff --git a/vendor/go.opentelemetry.io/otel/attribute/type_string.go b/vendor/go.opentelemetry.io/otel/attribute/type_string.go index 24f1fa37d..6c04448d6 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/type_string.go +++ b/vendor/go.opentelemetry.io/otel/attribute/type_string.go @@ -8,7 +8,7 @@ func _() { // An "invalid array index" compiler error signifies that the constant values have changed. // Re-run the stringer command to generate them again. var x [1]struct{} - _ = x[INVALID-0] + _ = x[EMPTY-0] _ = x[BOOL-1] _ = x[INT64-2] _ = x[FLOAT64-3] @@ -19,9 +19,9 @@ func _() { _ = x[STRINGSLICE-8] } -const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICESTRINGSLICE" +const _Type_name = "EMPTYBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICESTRINGSLICE" -var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71} +var _Type_index = [...]uint8{0, 5, 9, 14, 21, 27, 36, 46, 58, 69} func (i Type) String() string { idx := int(i) - 0 diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go index 5931e7129..db04b1326 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/value.go +++ b/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -6,7 +6,6 @@ package attribute // import "go.opentelemetry.io/otel/attribute" import ( "encoding/json" "fmt" - "reflect" "strconv" attribute "go.opentelemetry.io/otel/attribute/internal" @@ -18,6 +17,8 @@ import ( type Type int // nolint: revive // redefines builtin Type. // Value represents the value part in key-value pairs. +// +// Note that the zero value is a valid empty value. type Value struct { vtype Type numeric uint64 @@ -26,8 +27,8 @@ type Value struct { } const ( - // INVALID is used for a Value with no value set. - INVALID Type = iota + // EMPTY is used for a Value with no value set. + EMPTY Type = iota // BOOL is a boolean Type Value. BOOL // INT64 is a 64-bit signed integral Type Value. @@ -44,6 +45,10 @@ const ( FLOAT64SLICE // STRINGSLICE is a slice of strings Type Value. STRINGSLICE + // INVALID is used for a Value with no value set. + // + // Deprecated: Use EMPTY instead as an empty value is a valid value. + INVALID = EMPTY ) // BoolValue creates a BOOL Value. @@ -56,7 +61,7 @@ func BoolValue(v bool) Value { // BoolSliceValue creates a BOOLSLICE Value. func BoolSliceValue(v []bool) Value { - return Value{vtype: BOOLSLICE, slice: attribute.BoolSliceValue(v)} + return Value{vtype: BOOLSLICE, slice: attribute.SliceValue(v)} } // IntValue creates an INT64 Value. @@ -64,16 +69,30 @@ func IntValue(v int) Value { return Int64Value(int64(v)) } -// IntSliceValue creates an INTSLICE Value. +// IntSliceValue creates an INT64SLICE Value. func IntSliceValue(v []int) Value { - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[int64]())) - for i, val := range v { - cp.Elem().Index(i).SetInt(int64(val)) - } - return Value{ - vtype: INT64SLICE, - slice: cp.Elem().Interface(), + val := Value{vtype: INT64SLICE} + + // Avoid the common tiny-slice cases from allocating a new slice. + switch len(v) { + case 0: + val.slice = [0]int64{} + case 1: + val.slice = [1]int64{int64(v[0])} + case 2: + val.slice = [2]int64{int64(v[0]), int64(v[1])} + case 3: + val.slice = [3]int64{int64(v[0]), int64(v[1]), int64(v[2])} + default: + // Fallback to a new slice for larger slices. + cp := make([]int64, len(v)) + for i, val := range v { + cp[i] = int64(val) + } + val.slice = attribute.SliceValue(cp) } + + return val } // Int64Value creates an INT64 Value. @@ -86,7 +105,7 @@ func Int64Value(v int64) Value { // Int64SliceValue creates an INT64SLICE Value. func Int64SliceValue(v []int64) Value { - return Value{vtype: INT64SLICE, slice: attribute.Int64SliceValue(v)} + return Value{vtype: INT64SLICE, slice: attribute.SliceValue(v)} } // Float64Value creates a FLOAT64 Value. @@ -99,7 +118,7 @@ func Float64Value(v float64) Value { // Float64SliceValue creates a FLOAT64SLICE Value. func Float64SliceValue(v []float64) Value { - return Value{vtype: FLOAT64SLICE, slice: attribute.Float64SliceValue(v)} + return Value{vtype: FLOAT64SLICE, slice: attribute.SliceValue(v)} } // StringValue creates a STRING Value. @@ -112,7 +131,7 @@ func StringValue(v string) Value { // StringSliceValue creates a STRINGSLICE Value. func StringSliceValue(v []string) Value { - return Value{vtype: STRINGSLICE, slice: attribute.StringSliceValue(v)} + return Value{vtype: STRINGSLICE, slice: attribute.SliceValue(v)} } // Type returns a type of the Value. @@ -136,7 +155,7 @@ func (v Value) AsBoolSlice() []bool { } func (v Value) asBoolSlice() []bool { - return attribute.AsBoolSlice(v.slice) + return attribute.AsSlice[bool](v.slice) } // AsInt64 returns the int64 value. Make sure that the Value's type is @@ -155,7 +174,7 @@ func (v Value) AsInt64Slice() []int64 { } func (v Value) asInt64Slice() []int64 { - return attribute.AsInt64Slice(v.slice) + return attribute.AsSlice[int64](v.slice) } // AsFloat64 returns the float64 value. Make sure that the Value's @@ -174,7 +193,7 @@ func (v Value) AsFloat64Slice() []float64 { } func (v Value) asFloat64Slice() []float64 { - return attribute.AsFloat64Slice(v.slice) + return attribute.AsSlice[float64](v.slice) } // AsString returns the string value. Make sure that the Value's type @@ -193,7 +212,7 @@ func (v Value) AsStringSlice() []string { } func (v Value) asStringSlice() []string { - return attribute.AsStringSlice(v.slice) + return attribute.AsSlice[string](v.slice) } type unknownValueType struct{} @@ -217,6 +236,8 @@ func (v Value) AsInterface() any { return v.stringly case STRINGSLICE: return v.asStringSlice() + case EMPTY: + return nil } return unknownValueType{} } @@ -252,6 +273,8 @@ func (v Value) Emit() string { return string(j) case STRING: return v.stringly + case EMPTY: + return "" default: return "unknown" } diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index c4093e49a..878ffbe43 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -14,8 +14,7 @@ import ( ) const ( - maxMembers = 180 - maxBytesPerMembers = 4096 + maxMembers = 64 maxBytesPerBaggageString = 8192 listDelimiter = "," @@ -29,7 +28,6 @@ var ( errInvalidProperty = errors.New("invalid baggage list-member property") errInvalidMember = errors.New("invalid baggage list-member") errMemberNumber = errors.New("too many list-members in baggage-string") - errMemberBytes = errors.New("list-member too large") errBaggageBytes = errors.New("baggage-string too large") ) @@ -309,10 +307,6 @@ func newInvalidMember() Member { // an error if the input is invalid according to the W3C Baggage // specification. func parseMember(member string) (Member, error) { - if n := len(member); n > maxBytesPerMembers { - return newInvalidMember(), fmt.Errorf("%w: %d", errMemberBytes, n) - } - var props properties keyValue, properties, found := strings.Cut(member, propertyDelimiter) if found { @@ -430,6 +424,10 @@ type Baggage struct { //nolint:golint // New returns a new valid Baggage. It returns an error if it results in a // Baggage exceeding limits set in that specification. // +// If the resulting Baggage exceeds the maximum allowed members or bytes, +// members are dropped until the limits are satisfied and an error is returned +// along with the partial result. +// // It expects all the provided members to have already been validated. func New(members ...Member) (Baggage, error) { if len(members) == 0 { @@ -441,7 +439,6 @@ func New(members ...Member) (Baggage, error) { if !m.hasData { return Baggage{}, errInvalidMember } - // OpenTelemetry resolves duplicates by last-one-wins. b[m.key] = baggage.Item{ Value: m.value, @@ -449,17 +446,42 @@ func New(members ...Member) (Baggage, error) { } } - // Check member numbers after deduplication. + var truncateErr error + + // Check member count after deduplication. if len(b) > maxMembers { - return Baggage{}, errMemberNumber + truncateErr = errors.Join(truncateErr, errMemberNumber) + for k := range b { + if len(b) <= maxMembers { + break + } + delete(b, k) + } } - bag := Baggage{b} - if n := len(bag.String()); n > maxBytesPerBaggageString { - return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n) + // Check byte size and drop members if necessary. + totalBytes := 0 + first := true + for k := range b { + m := Member{ + key: k, + value: b[k].Value, + properties: fromInternalProperties(b[k].Properties), + } + memberSize := len(m.String()) + if !first { + memberSize++ // comma separator + } + if totalBytes+memberSize > maxBytesPerBaggageString { + truncateErr = errors.Join(truncateErr, fmt.Errorf("%w: %d", errBaggageBytes, totalBytes+memberSize)) + delete(b, k) + continue + } + totalBytes += memberSize + first = false } - return bag, nil + return Baggage{b}, truncateErr } // Parse attempts to decode a baggage-string from the passed string. It @@ -470,36 +492,71 @@ func New(members ...Member) (Baggage, error) { // defined (reading left-to-right) will be the only one kept. This diverges // from the W3C Baggage specification which allows duplicate list-members, but // conforms to the OpenTelemetry Baggage specification. +// +// If the baggage-string exceeds the maximum allowed members (64) or bytes +// (8192), members are dropped until the limits are satisfied and an error is +// returned along with the partial result. +// +// Invalid members are skipped and the error is returned along with the +// partial result containing the valid members. func Parse(bStr string) (Baggage, error) { if bStr == "" { return Baggage{}, nil } - if n := len(bStr); n > maxBytesPerBaggageString { - return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n) - } - b := make(baggage.List) + sizes := make(map[string]int) // Track per-key byte sizes + var totalBytes int + var truncateErr error for memberStr := range strings.SplitSeq(bStr, listDelimiter) { + // Check member count limit. + if len(b) >= maxMembers { + truncateErr = errors.Join(truncateErr, errMemberNumber) + break + } + m, err := parseMember(memberStr) if err != nil { - return Baggage{}, err + truncateErr = errors.Join(truncateErr, err) + continue // skip invalid member, keep processing } + + // Check byte size limit. + // Account for comma separator between members. + memberBytes := len(m.String()) + _, existingKey := b[m.key] + if !existingKey && len(b) > 0 { + memberBytes++ // comma separator only for new keys + } + + // Calculate new totalBytes if we add/overwrite this key + var newTotalBytes int + if oldSize, exists := sizes[m.key]; exists { + // Overwriting existing key: subtract old size, add new size + newTotalBytes = totalBytes - oldSize + memberBytes + } else { + // New key + newTotalBytes = totalBytes + memberBytes + } + + if newTotalBytes > maxBytesPerBaggageString { + truncateErr = errors.Join(truncateErr, errBaggageBytes) + break + } + // OpenTelemetry resolves duplicates by last-one-wins. b[m.key] = baggage.Item{ Value: m.value, Properties: m.properties.asInternal(), } + sizes[m.key] = memberBytes + totalBytes = newTotalBytes } - // OpenTelemetry does not allow for duplicate list-members, but the W3C - // specification does. Now that we have deduplicated, ensure the baggage - // does not exceed list-member limits. - if len(b) > maxMembers { - return Baggage{}, errMemberNumber + if len(b) == 0 { + return Baggage{}, truncateErr } - - return Baggage{b}, nil + return Baggage{b}, truncateErr } // Member returns the baggage list-member identified by key. diff --git a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile index 676e79116..7a9b3c055 100644 --- a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile +++ b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile @@ -1,4 +1,4 @@ # This is a renovate-friendly source of Docker images. FROM python:3.13.6-slim-bullseye@sha256:e98b521460ee75bca92175c16247bdf7275637a8faaeb2bcfa19d879ae5c4b9a AS python -FROM otel/weaver:v0.20.0@sha256:fa4f1c6954ecea78ab1a4e865bd6f5b4aaba80c1896f9f4a11e2c361d04e197e AS weaver +FROM otel/weaver:v0.22.1@sha256:33ae522ae4b71c1c562563c1d81f46aa0f79f088a0873199143a1f11ac30e5c9 AS weaver FROM avtodev/markdown-lint:v1@sha256:6aeedc2f49138ce7a1cd0adffc1b1c0321b841dc2102408967d9301c031949ee AS markdown diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/client.go index 6a80ec128..320d803e9 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/client.go @@ -218,7 +218,7 @@ func (c *client) exportContext(parent context.Context) (context.Context, context if c.exportTimeout > 0 { ctx, cancel = context.WithTimeoutCause(parent, c.exportTimeout, errors.New("exporter export timeout")) } else { - ctx, cancel = context.WithCancel(parent) + ctx, cancel = context.WithCancel(parent) //nolint:gosec // cancel is handled by caller. } if c.metadata.Len() > 0 { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/observ/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/observ/instrumentation.go index 9eff0ba61..0a171fa97 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/observ/instrumentation.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/observ/instrumentation.go @@ -21,8 +21,8 @@ import ( "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/x" "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/metric" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" - "go.opentelemetry.io/otel/semconv/v1.39.0/otelconv" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" + "go.opentelemetry.io/otel/semconv/v1.40.0/otelconv" ) const ( @@ -161,12 +161,12 @@ func NewInstrumentation(id int64, target string) (*Instrumentation, error) { // ExportLogs method returns. func (i *Instrumentation) ExportLogs(ctx context.Context, count int64) ExportOp { start := time.Now() - addOpt := get[metric.AddOption](addOpPool) - defer put(addOpPool, addOpt) - - *addOpt = append(*addOpt, i.addOpt) - - i.logInflightMetric.Add(ctx, count, *addOpt...) + if i.logInflightMetric.Enabled(ctx) { + addOpt := get[metric.AddOption](addOpPool) + defer put(addOpPool, addOpt) + *addOpt = append(*addOpt, i.addOpt) + i.logInflightMetric.Add(ctx, count, *addOpt...) + } return ExportOp{ nLogs: count, @@ -198,11 +198,14 @@ func (e ExportOp) End(err error) { defer put(addOpPool, addOpt) *addOpt = append(*addOpt, e.inst.addOpt) - e.inst.logInflightMetric.Add(e.ctx, -e.nLogs, *addOpt...) + if e.inst.logInflightMetric.Enabled(e.ctx) { + e.inst.logInflightMetric.Add(e.ctx, -e.nLogs, *addOpt...) + } success := successful(e.nLogs, err) - e.inst.logExportedMetric.Add(e.ctx, success, *addOpt...) - - if err != nil { + if e.inst.logExportedMetric.Enabled(e.ctx) { + e.inst.logExportedMetric.Add(e.ctx, success, *addOpt...) + } + if err != nil && e.inst.logExportedMetric.Enabled(e.ctx) { // Add the error.type attribute to the attribute set. attrs := get[attribute.KeyValue](attrsPool) defer put(attrsPool, attrs) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform/log.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform/log.go index 7bb3967f7..81c05e6e8 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform/log.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform/log.go @@ -201,6 +201,7 @@ func AttrValue(v attribute.Value) *cpb.AnyValue { Values: stringSliceValues(v.AsStringSlice()), }, } + case attribute.EMPTY: default: av.Value = &cpb.AnyValue_StringValue{ StringValue: "INVALID", @@ -327,6 +328,7 @@ func LogAttrValue(v api.Value) *cpb.AnyValue { Values: LogAttrs(v.AsMap()), }, } + case api.KindEmpty: default: av.Value = &cpb.AnyValue_StringValue{ StringValue: "INVALID", diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/version.go index 18e751907..96b9022bd 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/version.go @@ -5,4 +5,4 @@ package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlp // Version is the current release version of the OpenTelemetry otlploggrpc // exporter in use. -const Version = "0.16.0" +const Version = "0.19.0" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/client.go index 46ef11369..8272d1a54 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/client.go @@ -45,6 +45,15 @@ func newNoopClient() *client { var exporterN atomic.Int64 +var errInsecureEndpointWithTLS = errors.New("insecure HTTP endpoint cannot use TLS client configuration") + +// maxResponseBodySize is the maximum number of bytes to read from a response +// body. It is set to 4 MiB per the OTLP specification recommendation to +// mitigate excessive memory usage caused by a misconfigured or malicious +// server. If exceeded, the response is treated as a not-retryable error. +// This is a variable to allow tests to override it. +var maxResponseBodySize int64 = 4 * 1024 * 1024 + // nextExporterID returns the next unique ID for an exporter. func nextExporterID() int64 { const inc = 1 @@ -52,7 +61,11 @@ func nextExporterID() int64 { } // newHTTPClient creates a new HTTP log client. -func newHTTPClient(cfg config) (*client, error) { +func newHTTPClient(ctx context.Context, cfg config) (*client, error) { + if cfg.insecure.Value && cfg.tlsCfg.Value != nil { + return nil, errInsecureEndpointWithTLS + } + hc := cfg.httpClient if hc == nil { hc = &http.Client{ @@ -82,7 +95,7 @@ func newHTTPClient(cfg config) (*client, error) { u.Scheme = "http" } // Body is set when this is cloned during upload. - req, err := http.NewRequest(http.MethodPost, u.String(), http.NoBody) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, u.String(), http.NoBody) if err != nil { return nil, err } @@ -165,6 +178,7 @@ func (c *httpClient) uploadLogs(ctx context.Context, data []*logpb.ResourceLogs) } request.reset(iCtx) + // nolint:gosec // URL is constructed from validated OTLP endpoint configuration resp, err := c.client.Do(request.Request) var urlErr *url.Error if errors.As(err, &urlErr) && urlErr.Temporary() { @@ -187,7 +201,11 @@ func (c *httpClient) uploadLogs(ctx context.Context, data []*logpb.ResourceLogs) // Read the partial success message, if any. var respData bytes.Buffer - if _, err := io.Copy(&respData, resp.Body); err != nil { + if _, err := io.Copy(&respData, http.MaxBytesReader(nil, resp.Body, maxResponseBodySize)); err != nil { + var maxBytesErr *http.MaxBytesError + if errors.As(err, &maxBytesErr) { + return fmt.Errorf("response body too large: exceeded %d bytes", maxBytesErr.Limit) + } return err } if respData.Len() == 0 { @@ -218,7 +236,11 @@ func (c *httpClient) uploadLogs(ctx context.Context, data []*logpb.ResourceLogs) // message to be returned. It will help in // debugging the actual issue. var respData bytes.Buffer - if _, err := io.Copy(&respData, resp.Body); err != nil { + if _, err := io.Copy(&respData, http.MaxBytesReader(nil, resp.Body, maxResponseBodySize)); err != nil { + var maxBytesErr *http.MaxBytesError + if errors.As(err, &maxBytesErr) { + return fmt.Errorf("response body too large: exceeded %d bytes", maxBytesErr.Limit) + } return err } respStr := strings.TrimSpace(respData.String()) @@ -256,6 +278,7 @@ func (c *httpClient) newRequest(ctx context.Context, body []byte) (request, erro case NoCompression: r.ContentLength = int64(len(body)) req.bodyReader = bodyReader(body) + req.GetBody = bodyReaderErr(body) case GzipCompression: // Ensure the content length is not used. r.ContentLength = -1 @@ -276,6 +299,7 @@ func (c *httpClient) newRequest(ctx context.Context, body []byte) (request, erro } req.bodyReader = bodyReader(b.Bytes()) + req.GetBody = bodyReaderErr(body) } return req, nil @@ -288,6 +312,13 @@ func bodyReader(buf []byte) func() io.ReadCloser { } } +// bodyReaderErr returns a closure returning a new reader for buf. +func bodyReaderErr(buf []byte) func() (io.ReadCloser, error) { + return func() (io.ReadCloser, error) { + return io.NopCloser(bytes.NewReader(buf)), nil + } +} + // request wraps an http.Request with a resettable body reader. type request struct { *http.Request diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/doc.go index 2607e3b9b..4c8b94d69 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/doc.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/doc.go @@ -24,6 +24,11 @@ The value may additionally contain a port and a path. The value should not contain a query string or fragment. The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithURLPath] options. +OTEL_EXPORTER_OTLP_INSECURE, OTEL_EXPORTER_OTLP_LOGS_INSECURE (default: "false") - +setting "true" disables client transport security for the exporter's HTTP connection. +OTEL_EXPORTER_OTLP_LOGS_INSECURE takes precedence over OTEL_EXPORTER_OTLP_INSECURE. +The configuration can be overridden by [WithInsecure] and [WithTLSClientConfig] options. + OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_LOGS_HEADERS (default: none) - key-value pairs used as headers associated with HTTP requests. The value is expected to be represented in a format matching the [W3C Baggage HTTP Header Content Format], diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/exporter.go index 4436d0cd8..eca9a7942 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/exporter.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/exporter.go @@ -26,9 +26,9 @@ var _ log.Exporter = (*Exporter)(nil) // // It is recommended to use it with a [BatchProcessor] // or other processor exporting records asynchronously. -func New(_ context.Context, options ...Option) (*Exporter, error) { +func New(ctx context.Context, options ...Option) (*Exporter, error) { cfg := newConfig(options) - c, err := newHTTPClient(cfg) + c, err := newHTTPClient(ctx, cfg) if err != nil { return nil, err } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/observ/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/observ/instrumentation.go index 1420d2490..96ec46588 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/observ/instrumentation.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/observ/instrumentation.go @@ -21,8 +21,8 @@ import ( "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/x" "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/metric" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" - "go.opentelemetry.io/otel/semconv/v1.39.0/otelconv" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" + "go.opentelemetry.io/otel/semconv/v1.40.0/otelconv" ) const ( @@ -184,16 +184,18 @@ func ServerAddrAttrs(target string) []attribute.KeyValue { func (i *Instrumentation) ExportLogs(ctx context.Context, count int64) ExportOp { start := time.Now() - addOpt := get[metric.AddOption](addOptPool) - defer put(addOptPool, addOpt) - *addOpt = append(*addOpt, i.addOpt) - i.inflightMetric.Add(ctx, count, *addOpt...) + if i.inflightMetric.Enabled(ctx) { + addOpt := get[metric.AddOption](addOptPool) + defer put(addOptPool, addOpt) + *addOpt = append(*addOpt, i.addOpt) + i.inflightMetric.Add(ctx, count, *addOpt...) + } return ExportOp{ ctx: ctx, - start: start, inst: i, count: count, + start: start, } } @@ -214,15 +216,30 @@ type ExportOp struct { // of successfully exported logs will be determined by inspecting the // RejectedItems field of the PartialSuccess. func (e ExportOp) End(err error, code int) { - addOpt := get[metric.AddOption](addOptPool) - defer put(addOptPool, addOpt) - *addOpt = append(*addOpt, e.inst.addOpt) + inflightEnabled := e.inst.inflightMetric.Enabled(e.ctx) + exportedEnabled := e.inst.exportedMetric.Enabled(e.ctx) + durationEnabled := e.inst.operationDuration.Enabled(e.ctx) - e.inst.inflightMetric.Add(e.ctx, -e.count, *addOpt...) - success := successful(e.count, err) - e.inst.exportedMetric.Add(e.ctx, success, *addOpt...) + if !inflightEnabled && !exportedEnabled && !durationEnabled { + return + } - if err != nil { + var success int64 + if inflightEnabled || exportedEnabled { + addOpt := get[metric.AddOption](addOptPool) + defer put(addOptPool, addOpt) + *addOpt = append(*addOpt, e.inst.addOpt) + + if inflightEnabled { + e.inst.inflightMetric.Add(e.ctx, -e.count, *addOpt...) + } + if exportedEnabled { + success = successful(e.count, err) + e.inst.exportedMetric.Add(e.ctx, success, *addOpt...) + } + } + + if err != nil && exportedEnabled { attrs := get[attribute.KeyValue](attrsPool) defer put(attrsPool, attrs) @@ -233,12 +250,13 @@ func (e ExportOp) End(err error, code int) { e.inst.exportedMetric.Add(e.ctx, e.count-success, a) } - record := get[metric.RecordOption](recordPool) - defer put(recordPool, record) - *record = append(*record, e.recordOption(err, code)) - - duration := time.Since(e.start).Seconds() - e.inst.operationDuration.Record(e.ctx, duration, *record...) + if durationEnabled { + record := get[metric.RecordOption](recordPool) + defer put(recordPool, record) + *record = append(*record, e.recordOption(err, code)) + duration := time.Since(e.start).Seconds() + e.inst.operationDuration.Record(e.ctx, duration, *record...) + } } func (e ExportOp) recordOption(err error, code int) metric.RecordOption { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform/log.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform/log.go index c3d9710c2..b5a0f5b86 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform/log.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform/log.go @@ -201,6 +201,7 @@ func AttrValue(v attribute.Value) *cpb.AnyValue { Values: stringSliceValues(v.AsStringSlice()), }, } + case attribute.EMPTY: default: av.Value = &cpb.AnyValue_StringValue{ StringValue: "INVALID", @@ -327,6 +328,7 @@ func LogAttrValue(v api.Value) *cpb.AnyValue { Values: LogAttrs(v.AsMap()), }, } + case api.KindEmpty: default: av.Value = &cpb.AnyValue_StringValue{ StringValue: "INVALID", diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/version.go index 7563ba67e..380fc10d2 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/version.go @@ -4,4 +4,4 @@ package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal" // Version is the current release version of the OpenTelemetry OTLP over HTTP/protobuf logs exporter in use. -const Version = "0.16.0" +const Version = "0.19.0" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go index 756bf7964..7b7f90729 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go @@ -151,7 +151,7 @@ func (c *client) exportContext(parent context.Context) (context.Context, context if c.exportTimeout > 0 { ctx, cancel = context.WithTimeoutCause(parent, c.exportTimeout, errors.New("exporter export timeout")) } else { - ctx, cancel = context.WithCancel(parent) + ctx, cancel = context.WithCancel(parent) //nolint:gosec // cancel is handled by the caller. } if c.metadata.Len() > 0 { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/attribute.go index cb70a9c41..3e616a927 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/attribute.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/attribute.go @@ -87,6 +87,7 @@ func Value(v attribute.Value) *cpb.AnyValue { Values: stringSliceValues(v.AsStringSlice()), }, } + case attribute.EMPTY: default: av.Value = &cpb.AnyValue_StringValue{ StringValue: "INVALID", diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go index e4ec772af..0606277b3 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go @@ -5,5 +5,5 @@ package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpme // Version is the current release version of the OpenTelemetry OTLP over gRPC metrics exporter in use. func Version() string { - return "1.40.0" + return "1.43.0" } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go index 76381b56e..1958f9d1b 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go @@ -52,8 +52,21 @@ var ourTransport = &http.Transport{ ExpectContinueTimeout: 1 * time.Second, } +var errInsecureEndpointWithTLS = errors.New("insecure HTTP endpoint cannot use TLS client configuration") + +// maxResponseBodySize is the maximum number of bytes to read from a response +// body. It is set to 4 MiB per the OTLP specification recommendation to +// mitigate excessive memory usage caused by a misconfigured or malicious +// server. If exceeded, the response is treated as a not-retryable error. +// This is a variable to allow tests to override it. +var maxResponseBodySize int64 = 4 * 1024 * 1024 + // newClient creates a new HTTP metric client. func newClient(cfg oconf.Config) (*client, error) { + if cfg.Metrics.Insecure && cfg.Metrics.TLSCfg != nil { + return nil, errInsecureEndpointWithTLS + } + httpClient := cfg.Metrics.HTTPClient if httpClient == nil { httpClient = &http.Client{ @@ -83,7 +96,7 @@ func newClient(cfg oconf.Config) (*client, error) { u.Scheme = "http" } // Body is set when this is cloned during upload. - req, err := http.NewRequest(http.MethodPost, u.String(), http.NoBody) + req, err := http.NewRequestWithContext(context.Background(), http.MethodPost, u.String(), http.NoBody) if err != nil { return nil, err } @@ -146,6 +159,7 @@ func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.Resou } request.reset(iCtx) + // nolint:gosec // URL is constructed from validated OTLP endpoint configuration resp, err := c.httpClient.Do(request.Request) var urlErr *url.Error if errors.As(err, &urlErr) && urlErr.Temporary() { @@ -167,7 +181,11 @@ func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.Resou // Read the partial success message, if any. var respData bytes.Buffer - if _, err := io.Copy(&respData, resp.Body); err != nil { + if _, err := io.Copy(&respData, http.MaxBytesReader(nil, resp.Body, maxResponseBodySize)); err != nil { + var maxBytesErr *http.MaxBytesError + if errors.As(err, &maxBytesErr) { + return fmt.Errorf("response body too large: exceeded %d bytes", maxBytesErr.Limit) + } return err } if respData.Len() == 0 { @@ -198,7 +216,11 @@ func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.Resou // message to be returned. It will help in // debugging the actual issue. var respData bytes.Buffer - if _, err := io.Copy(&respData, resp.Body); err != nil { + if _, err := io.Copy(&respData, http.MaxBytesReader(nil, resp.Body, maxResponseBodySize)); err != nil { + var maxBytesErr *http.MaxBytesError + if errors.As(err, &maxBytesErr) { + return fmt.Errorf("response body too large: exceeded %d bytes", maxBytesErr.Limit) + } return err } respStr := strings.TrimSpace(respData.String()) @@ -236,6 +258,7 @@ func (c *client) newRequest(ctx context.Context, body []byte) (request, error) { case NoCompression: r.ContentLength = int64(len(body)) req.bodyReader = bodyReader(body) + req.GetBody = bodyReaderErr(body) case GzipCompression: // Ensure the content length is not used. r.ContentLength = -1 @@ -256,6 +279,7 @@ func (c *client) newRequest(ctx context.Context, body []byte) (request, error) { } req.bodyReader = bodyReader(b.Bytes()) + req.GetBody = bodyReaderErr(body) } return req, nil @@ -268,6 +292,13 @@ func bodyReader(buf []byte) func() io.ReadCloser { } } +// bodyReaderErr returns a closure returning a new reader for buf. +func bodyReaderErr(buf []byte) func() (io.ReadCloser, error) { + return func() (io.ReadCloser, error) { + return io.NopCloser(bytes.NewReader(buf)), nil + } +} + // request wraps an http.Request with a resettable body reader. type request struct { *http.Request diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go index de9e71a6e..a2ea04922 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go @@ -24,6 +24,11 @@ The value may additionally contain a port and a path. The value should not contain a query string or fragment. The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithURLPath] options. +OTEL_EXPORTER_OTLP_INSECURE, OTEL_EXPORTER_OTLP_METRICS_INSECURE (default: "false") - +setting "true" disables client transport security for the exporter's HTTP connection. +OTEL_EXPORTER_OTLP_METRICS_INSECURE takes precedence over OTEL_EXPORTER_OTLP_INSECURE. +The configuration can be overridden by [WithInsecure] and [WithTLSClientConfig] options. + OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_METRICS_HEADERS (default: none) - key-value pairs used as headers associated with HTTP requests. The value is expected to be represented in a format matching the [W3C Baggage HTTP Header Content Format], diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/attribute.go index 6c9787189..9e3d8da1e 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/attribute.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/attribute.go @@ -87,6 +87,7 @@ func Value(v attribute.Value) *cpb.AnyValue { Values: stringSliceValues(v.AsStringSlice()), }, } + case attribute.EMPTY: default: av.Value = &cpb.AnyValue_StringValue{ StringValue: "INVALID", diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go index 58244df5f..da551839d 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go @@ -5,5 +5,5 @@ package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpme // Version is the current release version of the OpenTelemetry OTLP over HTTP/protobuf metrics exporter in use. func Version() string { - return "1.40.0" + return "1.43.0" } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go index d9bfd6e17..12e243e04 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go @@ -93,6 +93,7 @@ func Value(v attribute.Value) *commonpb.AnyValue { Values: stringSliceValues(v.AsStringSlice()), }, } + case attribute.EMPTY: default: av.Value = &commonpb.AnyValue_StringValue{ StringValue: "INVALID", diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go index 76b7cd461..258d0ca6a 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go @@ -62,7 +62,7 @@ func NewClient(opts ...Option) otlptrace.Client { func newClient(opts ...Option) *client { cfg := otlpconfig.NewGRPCConfig(asGRPCOptions(opts)...) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(context.Background()) //nolint:gosec // cancel called in client shutdown. c := &client{ endpoint: cfg.Traces.Endpoint, @@ -248,7 +248,7 @@ func (c *client) exportContext(parent context.Context) (context.Context, context if c.exportTimeout > 0 { ctx, cancel = context.WithTimeoutCause(parent, c.exportTimeout, errors.New("exporter export timeout")) } else { - ctx, cancel = context.WithCancel(parent) + ctx, cancel = context.WithCancel(parent) //nolint:gosec // cancel called by caller when export is complete. } if c.metadata.Len() > 0 { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/instrumentation.go index d4a69f4d7..a84733174 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/instrumentation.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/instrumentation.go @@ -18,8 +18,8 @@ import ( "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x" "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/metric" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" - "go.opentelemetry.io/otel/semconv/v1.39.0/otelconv" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" + "go.opentelemetry.io/otel/semconv/v1.40.0/otelconv" ) const ( @@ -208,10 +208,12 @@ func BaseAttrs(id int64, target string) []attribute.KeyValue { func (i *Instrumentation) ExportSpans(ctx context.Context, nSpans int) ExportOp { start := time.Now() - addOpt := get[metric.AddOption](addOptPool) - defer put(addOptPool, addOpt) - *addOpt = append(*addOpt, i.addOpt) - i.inflightSpans.Add(ctx, int64(nSpans), *addOpt...) + if i.inflightSpans.Enabled(ctx) { + addOpt := get[metric.AddOption](addOptPool) + defer put(addOptPool, addOpt) + *addOpt = append(*addOpt, i.addOpt) + i.inflightSpans.Add(ctx, int64(nSpans), *addOpt...) + } return ExportOp{ ctx: ctx, @@ -244,14 +246,18 @@ func (e ExportOp) End(err error, code codes.Code) { defer put(addOptPool, addOpt) *addOpt = append(*addOpt, e.inst.addOpt) - e.inst.inflightSpans.Add(e.ctx, -e.nSpans, *addOpt...) + if e.inst.inflightSpans.Enabled(e.ctx) { + e.inst.inflightSpans.Add(e.ctx, -e.nSpans, *addOpt...) + } success := successful(e.nSpans, err) // Record successfully exported spans, even if the value is 0 which are // meaningful to distribution aggregations. - e.inst.exportedSpans.Add(e.ctx, success, *addOpt...) + if e.inst.exportedSpans.Enabled(e.ctx) { + e.inst.exportedSpans.Add(e.ctx, success, *addOpt...) + } - if err != nil { + if err != nil && e.inst.exportedSpans.Enabled(e.ctx) { attrs := get[attribute.KeyValue](measureAttrsPool) defer put(measureAttrsPool, attrs) *attrs = append(*attrs, e.inst.attrs...) @@ -266,12 +272,14 @@ func (e ExportOp) End(err error, code codes.Code) { e.inst.exportedSpans.Add(e.ctx, e.nSpans-success, *addOpt...) } - recOpt := get[metric.RecordOption](recordOptPool) - defer put(recordOptPool, recOpt) - *recOpt = append(*recOpt, e.inst.recordOption(err, code)) + if e.inst.opDuration.Enabled(e.ctx) { + recOpt := get[metric.RecordOption](recordOptPool) + defer put(recordOptPool, recOpt) + *recOpt = append(*recOpt, e.inst.recordOption(err, code)) - d := time.Since(e.start).Seconds() - e.inst.opDuration.Record(e.ctx, d, *recOpt...) + d := time.Since(e.start).Seconds() + e.inst.opDuration.Record(e.ctx, d, *recOpt...) + } } // recordOption returns a RecordOption with attributes representing the diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/version.go index 19fac1b72..7a1c420ec 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/version.go @@ -5,4 +5,4 @@ package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/ot // Version is the current release version of the OpenTelemetry OTLP gRPC trace // exporter in use. -const Version = "1.40.0" +const Version = "1.43.0" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go index 5af309fea..4ae569ff4 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go @@ -32,6 +32,13 @@ import ( const contentTypeProto = "application/x-protobuf" +// maxResponseBodySize is the maximum number of bytes to read from a response +// body. It is set to 4 MiB per the OTLP specification recommendation to +// mitigate excessive memory usage caused by a misconfigured or malicious +// server. If exceeded, the response is treated as a not-retryable error. +// This is a variable to allow tests to override it. +var maxResponseBodySize int64 = 4 * 1024 * 1024 + var gzPool = sync.Pool{ New: func() any { w := gzip.NewWriter(io.Discard) @@ -56,6 +63,8 @@ var ourTransport = &http.Transport{ ExpectContinueTimeout: 1 * time.Second, } +var errInsecureEndpointWithTLS = errors.New("insecure HTTP endpoint cannot use TLS client configuration") + type client struct { name string cfg otlpconfig.SignalConfig @@ -110,6 +119,10 @@ func NewClient(opts ...Option) otlptrace.Client { // Start does nothing in a HTTP client. func (c *client) Start(ctx context.Context) error { + if c.cfg.Insecure && c.cfg.TLSCfg != nil { + return errInsecureEndpointWithTLS + } + // Initialize the instrumentation if not already done. // // Initialize here instead of NewClient to allow any errors to be passed @@ -174,6 +187,7 @@ func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc } request.reset(ctx) + // nolint:gosec // URL is constructed from validated OTLP endpoint configuration resp, err := d.client.Do(request.Request) var urlErr *url.Error if errors.As(err, &urlErr) && urlErr.Temporary() { @@ -196,7 +210,11 @@ func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc // Success, do not retry. // Read the partial success message, if any. var respData bytes.Buffer - if _, err := io.Copy(&respData, resp.Body); err != nil { + if _, err := io.Copy(&respData, http.MaxBytesReader(nil, resp.Body, maxResponseBodySize)); err != nil { + var maxBytesErr *http.MaxBytesError + if errors.As(err, &maxBytesErr) { + return fmt.Errorf("response body too large: exceeded %d bytes", maxBytesErr.Limit) + } return err } if respData.Len() == 0 { @@ -227,7 +245,11 @@ func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc // message to be returned. It will help in // debugging the actual issue. var respData bytes.Buffer - if _, err := io.Copy(&respData, resp.Body); err != nil { + if _, err := io.Copy(&respData, http.MaxBytesReader(nil, resp.Body, maxResponseBodySize)); err != nil { + var maxBytesErr *http.MaxBytesError + if errors.As(err, &maxBytesErr) { + return fmt.Errorf("response body too large: exceeded %d bytes", maxBytesErr.Limit) + } return err } respStr := strings.TrimSpace(respData.String()) @@ -252,7 +274,7 @@ func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc func (d *client) newRequest(body []byte) (request, error) { u := url.URL{Scheme: d.getScheme(), Host: d.cfg.Endpoint, Path: d.cfg.URLPath} - r, err := http.NewRequest(http.MethodPost, u.String(), http.NoBody) + r, err := http.NewRequestWithContext(context.Background(), http.MethodPost, u.String(), http.NoBody) if err != nil { return request{Request: r}, err } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go index 9fea75ad1..85645e118 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go @@ -24,6 +24,11 @@ The value may additionally contain a port and a path. The value should not contain a query string or fragment. The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithURLPath] options. +OTEL_EXPORTER_OTLP_INSECURE, OTEL_EXPORTER_OTLP_TRACES_INSECURE (default: "false") - +setting "true" disables client transport security for the exporter's HTTP connection. +OTEL_EXPORTER_OTLP_TRACES_INSECURE takes precedence over OTEL_EXPORTER_OTLP_INSECURE. +The configuration can be overridden by [WithInsecure] and [WithTLSClientConfig] options. + OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_TRACES_HEADERS (default: none) - key-value pairs used as headers associated with HTTP requests. The value is expected to be represented in a format matching the [W3C Baggage HTTP Header Content Format], diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/observ/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/observ/instrumentation.go index 50e53c6a7..3f2556e7a 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/observ/instrumentation.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/observ/instrumentation.go @@ -23,8 +23,8 @@ import ( "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/x" "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/metric" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" - "go.opentelemetry.io/otel/semconv/v1.39.0/otelconv" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" + "go.opentelemetry.io/otel/semconv/v1.40.0/otelconv" ) const ( @@ -261,10 +261,12 @@ func parseIP(ip string) string { func (i *Instrumentation) ExportSpans(ctx context.Context, nSpans int) ExportOp { start := time.Now() - addOpt := get[metric.AddOption](addOptPool) - defer put(addOptPool, addOpt) - *addOpt = append(*addOpt, i.addOpt) - i.inflightSpans.Add(ctx, int64(nSpans), *addOpt...) + if i.inflightSpans.Enabled(ctx) { + addOpt := get[metric.AddOption](addOptPool) + defer put(addOptPool, addOpt) + *addOpt = append(*addOpt, i.addOpt) + i.inflightSpans.Add(ctx, int64(nSpans), *addOpt...) + } return ExportOp{ ctx: ctx, @@ -299,14 +301,18 @@ func (e ExportOp) End(err error, status int) { defer put(addOptPool, addOpt) *addOpt = append(*addOpt, e.inst.addOpt) - e.inst.inflightSpans.Add(e.ctx, -e.nSpans, *addOpt...) + if e.inst.inflightSpans.Enabled(e.ctx) { + e.inst.inflightSpans.Add(e.ctx, -e.nSpans, *addOpt...) + } success := successful(e.nSpans, err) // Record successfully exported spans, even if the value is 0 which are // meaningful to distribution aggregations. - e.inst.exportedSpans.Add(e.ctx, success, *addOpt...) + if e.inst.exportedSpans.Enabled(e.ctx) { + e.inst.exportedSpans.Add(e.ctx, success, *addOpt...) + } - if err != nil { + if err != nil && e.inst.exportedSpans.Enabled(e.ctx) { attrs := get[attribute.KeyValue](measureAttrsPool) defer put(measureAttrsPool, attrs) *attrs = append(*attrs, e.inst.attrs...) @@ -321,12 +327,14 @@ func (e ExportOp) End(err error, status int) { e.inst.exportedSpans.Add(e.ctx, e.nSpans-success, *addOpt...) } - recOpt := get[metric.RecordOption](recordOptPool) - defer put(recordOptPool, recOpt) - *recOpt = append(*recOpt, e.inst.recordOption(err, status)) + if e.inst.opDuration.Enabled(e.ctx) { + recOpt := get[metric.RecordOption](recordOptPool) + defer put(recordOptPool, recOpt) + *recOpt = append(*recOpt, e.inst.recordOption(err, status)) - d := time.Since(e.start).Seconds() - e.inst.opDuration.Record(e.ctx, d, *recOpt...) + d := time.Since(e.start).Seconds() + e.inst.opDuration.Record(e.ctx, d, *recOpt...) + } } // recordOption returns a RecordOption with attributes representing the diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/version.go index 17dbde44f..3e43f7711 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/version.go @@ -5,4 +5,4 @@ package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/ot // Version is the current release version of the OpenTelemetry OTLP HTTP trace // exporter in use. -const Version = "1.40.0" +const Version = "1.43.0" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go index 370cd2a6f..087e95f7b 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go @@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" // Version is the current release version of the OpenTelemetry OTLP trace exporter in use. func Version() string { - return "1.40.0" + return "1.43.0" } diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/internal/observ/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/internal/observ/instrumentation.go index 5097cae6f..bc6f33fe6 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/prometheus/internal/observ/instrumentation.go +++ b/vendor/go.opentelemetry.io/otel/exporters/prometheus/internal/observ/instrumentation.go @@ -17,8 +17,8 @@ import ( "go.opentelemetry.io/otel/exporters/prometheus/internal" "go.opentelemetry.io/otel/exporters/prometheus/internal/x" "go.opentelemetry.io/otel/metric" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" - "go.opentelemetry.io/otel/semconv/v1.39.0/otelconv" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" + "go.opentelemetry.io/otel/semconv/v1.40.0/otelconv" ) const ( @@ -183,6 +183,9 @@ type Timer struct { // // If err is non-nil, an appropriate error type attribute will be included. func (t Timer) Stop(err error) { + if !t.hist.Enabled(t.ctx) { + return + } recordOpt := get[metric.RecordOption](recordOptPool) defer put(recordOptPool, recordOpt) *recordOpt = append(*recordOpt, t.inst.setOpt) @@ -196,7 +199,6 @@ func (t Timer) Stop(err error) { set := attribute.NewSet(*attrs...) *recordOpt = append((*recordOpt)[:0], metric.WithAttributeSet(set)) } - t.hist.Record(t.ctx, time.Since(t.start).Seconds(), *recordOpt...) } @@ -205,11 +207,12 @@ func (t Timer) Stop(err error) { // It returns an [ExportOp] that tracks the export operation. The // [ExportOp.End] method must be called when the export completes. func (i *Instrumentation) ExportMetrics(ctx context.Context, n int64) ExportOp { - addOpt := get[metric.AddOption](addOptPool) - defer put(addOptPool, addOpt) - *addOpt = append(*addOpt, i.setOpt) - - i.inflightMetric.Add(ctx, n, *addOpt...) + if i.inflightMetric.Enabled(ctx) { + addOpt := get[metric.AddOption](addOptPool) + defer put(addOptPool, addOpt) + *addOpt = append(*addOpt, i.setOpt) + i.inflightMetric.Add(ctx, n, *addOpt...) + } return ExportOp{ctx: ctx, nMetrics: n, inst: i} } @@ -232,10 +235,14 @@ func (e ExportOp) End(success int64, err error) { defer put(addOptPool, addOpt) *addOpt = append(*addOpt, e.inst.setOpt) - e.inst.inflightMetric.Add(e.ctx, -e.nMetrics, *addOpt...) - e.inst.exportedMetric.Add(e.ctx, success, *addOpt...) + if e.inst.inflightMetric.Enabled(e.ctx) { + e.inst.inflightMetric.Add(e.ctx, -e.nMetrics, *addOpt...) + } + if e.inst.exportedMetric.Enabled(e.ctx) { + e.inst.exportedMetric.Add(e.ctx, success, *addOpt...) + } - if err != nil { + if err != nil && e.inst.exportedMetric.Enabled(e.ctx) { attrs := get[attribute.KeyValue](measureAttrsPool) defer put(measureAttrsPool, attrs) *attrs = append(*attrs, e.inst.attrs...) diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/internal/version.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/internal/version.go index a8508e558..a3527e265 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/prometheus/internal/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/prometheus/internal/version.go @@ -6,4 +6,4 @@ package internal // import "go.opentelemetry.io/otel/exporters/prometheus/intern // Version is the current release version of the OpenTelemetry prometheus // exporter in use. -const Version = "0.62.0" +const Version = "0.65.0" diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/internal/observ/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/internal/observ/instrumentation.go index 22ee1dc72..c2b2f6772 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/internal/observ/instrumentation.go +++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/internal/observ/instrumentation.go @@ -17,8 +17,8 @@ import ( "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/internal" "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/internal/x" "go.opentelemetry.io/otel/metric" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" - "go.opentelemetry.io/otel/semconv/v1.39.0/otelconv" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" + "go.opentelemetry.io/otel/semconv/v1.40.0/otelconv" ) const ( @@ -138,7 +138,11 @@ func NewInstrumentation(id int64) (*Instrumentation, error) { // ExportOp that needs to be ended with End() when the export operation completes. func (i *Instrumentation) ExportMetrics(ctx context.Context, count int64) ExportOp { start := time.Now() - i.inflight.Add(ctx, count, i.addOpts...) + + // Avoid work when observability is disabled. + if i.inflight.Enabled(ctx) { + i.inflight.Add(ctx, count, i.addOpts...) + } return ExportOp{ ctx: ctx, start: start, @@ -160,30 +164,57 @@ type ExportOp struct { // The err parameter indicates whether the operation failed. If err is not nil, // it is added to metrics as attribute. func (e ExportOp) End(err error) { - durationSeconds := time.Since(e.start).Seconds() - e.inst.inflight.Add(e.ctx, -e.nTraces, e.inst.addOpts...) + inflightEnabled := e.inst.inflight.Enabled(e.ctx) + exportedEnabled := e.inst.exported.Enabled(e.ctx) + durationEnabled := e.inst.duration.Enabled(e.ctx) + + if !inflightEnabled && !exportedEnabled && !durationEnabled { + return + } + + var durationSeconds float64 + if durationEnabled { + durationSeconds = time.Since(e.start).Seconds() + } + if inflightEnabled { + e.inst.inflight.Add(e.ctx, -e.nTraces, e.inst.addOpts...) + } if err == nil { // short circuit in case of success to avoid allocations - e.inst.exported.Inst().Add(e.ctx, e.nTraces, e.inst.addOpts...) - e.inst.duration.Inst().Record(e.ctx, durationSeconds, e.inst.recordOpts...) + if exportedEnabled { + e.inst.exported.Inst().Add(e.ctx, e.nTraces, e.inst.addOpts...) + } + if durationEnabled { + e.inst.duration.Inst().Record(e.ctx, durationSeconds, e.inst.recordOpts...) + } + return + } + + if !exportedEnabled && !durationEnabled { return } attrs := get[attribute.KeyValue](measureAttrsPool) - addOpts := get[metric.AddOption](addOptsPool) - recordOpts := get[metric.RecordOption](recordOptsPool) defer func() { put(measureAttrsPool, attrs) - put(addOptsPool, addOpts) - put(recordOptsPool, recordOpts) }() *attrs = append(*attrs, e.inst.attrs...) *attrs = append(*attrs, semconv.ErrorType(err)) set := attribute.NewSet(*attrs...) attrOpt := metric.WithAttributeSet(set) - *addOpts = append(*addOpts, attrOpt) - *recordOpts = append(*recordOpts, attrOpt) - e.inst.exported.Inst().Add(e.ctx, e.nTraces, *addOpts...) - e.inst.duration.Inst().Record(e.ctx, durationSeconds, *recordOpts...) + if exportedEnabled { + addOpts := get[metric.AddOption](addOptsPool) + defer put(addOptsPool, addOpts) + + *addOpts = append(*addOpts, attrOpt) + e.inst.exported.Inst().Add(e.ctx, e.nTraces, *addOpts...) + } + if durationEnabled { + recordOpts := get[metric.RecordOption](recordOptsPool) + defer put(recordOptsPool, recordOpts) + + *recordOpts = append(*recordOpts, attrOpt) + e.inst.duration.Inst().Record(e.ctx, durationSeconds, *recordOpts...) + } } diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/internal/version.go b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/internal/version.go index d7be63278..d7a82f6fd 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/internal/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/internal/version.go @@ -5,4 +5,4 @@ package internal // import "go.opentelemetry.io/otel/exporters/stdout/stdoutmetr // Version is the current release version of the OpenTelemetry stdoutmetric // exporter in use. -const Version = "1.40.0" +const Version = "1.43.0" diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/observ/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/observ/instrumentation.go index cc835fc8a..901cc938c 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/observ/instrumentation.go +++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/observ/instrumentation.go @@ -17,8 +17,8 @@ import ( "go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal" "go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x" "go.opentelemetry.io/otel/metric" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" - "go.opentelemetry.io/otel/semconv/v1.39.0/otelconv" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" + "go.opentelemetry.io/otel/semconv/v1.40.0/otelconv" ) const ( @@ -150,10 +150,12 @@ func NewInstrumentation(id int64) (*Instrumentation, error) { func (i *Instrumentation) ExportSpans(ctx context.Context, nSpans int) ExportOp { start := time.Now() - addOpt := get[metric.AddOption](addOptPool) - defer put(addOptPool, addOpt) - *addOpt = append(*addOpt, i.setOpt) - i.inflightSpans.Add(ctx, int64(nSpans), *addOpt...) + if i.inflightSpans.Enabled(ctx) { + addOpt := get[metric.AddOption](addOptPool) + defer put(addOptPool, addOpt) + *addOpt = append(*addOpt, i.setOpt) + i.inflightSpans.Add(ctx, int64(nSpans), *addOpt...) + } return ExportOp{ ctx: ctx, @@ -177,20 +179,32 @@ type ExportOp struct { // The err parameter indicates whether the operation failed. If err is not nil, // the number of failed spans (nSpans - success) is also recorded. func (e ExportOp) End(success int64, err error) { + inflightSpansEnable := e.inst.inflightSpans.Enabled(e.ctx) + exportedSpansEnable := e.inst.exportedSpans.Enabled(e.ctx) + opDurationEnable := e.inst.opDuration.Enabled(e.ctx) + + if !inflightSpansEnable && !exportedSpansEnable && !opDurationEnable { + return + } + addOpt := get[metric.AddOption](addOptPool) defer put(addOptPool, addOpt) *addOpt = append(*addOpt, e.inst.setOpt) - e.inst.inflightSpans.Add(e.ctx, -e.nSpans, *addOpt...) + if inflightSpansEnable { + e.inst.inflightSpans.Add(e.ctx, -e.nSpans, *addOpt...) + } // Record the success and duration of the operation. // // Do not exclude 0 values, as they are valid and indicate no spans // were exported which is meaningful for certain aggregations. - e.inst.exportedSpans.Add(e.ctx, success, *addOpt...) + if exportedSpansEnable { + e.inst.exportedSpans.Add(e.ctx, success, *addOpt...) + } mOpt := e.inst.setOpt - if err != nil { + if err != nil && exportedSpansEnable { attrs := get[attribute.KeyValue](measureAttrsPool) defer put(measureAttrsPool, attrs) *attrs = append(*attrs, e.inst.attrs...) @@ -207,8 +221,10 @@ func (e ExportOp) End(success int64, err error) { e.inst.exportedSpans.Add(e.ctx, e.nSpans-success, *addOpt...) } - recordOpt := get[metric.RecordOption](recordOptPool) - defer put(recordOptPool, recordOpt) - *recordOpt = append(*recordOpt, mOpt) - e.inst.opDuration.Record(e.ctx, time.Since(e.start).Seconds(), *recordOpt...) + if opDurationEnable { + recordOpt := get[metric.RecordOption](recordOptPool) + defer put(recordOptPool, recordOpt) + *recordOpt = append(*recordOpt, mOpt) + e.inst.opDuration.Record(e.ctx, time.Since(e.start).Seconds(), *recordOpt...) + } } diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/version.go b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/version.go index 087d5c393..78e618c47 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/version.go @@ -5,4 +5,4 @@ package internal // import "go.opentelemetry.io/otel/exporters/stdout/stdouttrac // Version is the current release version of the OpenTelemetry stdouttrace // exporter in use. -const Version = "1.40.0" +const Version = "1.43.0" diff --git a/vendor/go.opentelemetry.io/otel/internal/errorhandler/errorhandler.go b/vendor/go.opentelemetry.io/otel/internal/errorhandler/errorhandler.go new file mode 100644 index 000000000..3f0ab3131 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/errorhandler/errorhandler.go @@ -0,0 +1,96 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package errorhandler provides the global error handler for OpenTelemetry. +// +// This package has no OTel dependencies, allowing it to be imported by any +// package in the module without creating import cycles. +package errorhandler // import "go.opentelemetry.io/otel/internal/errorhandler" + +import ( + "errors" + "log" + "sync" + "sync/atomic" +) + +// ErrorHandler handles irremediable events. +type ErrorHandler interface { + // Handle handles any error deemed irremediable by an OpenTelemetry + // component. + Handle(error) +} + +type ErrDelegator struct { + delegate atomic.Pointer[ErrorHandler] +} + +// Compile-time check that delegator implements ErrorHandler. +var _ ErrorHandler = (*ErrDelegator)(nil) + +func (d *ErrDelegator) Handle(err error) { + if eh := d.delegate.Load(); eh != nil { + (*eh).Handle(err) + return + } + log.Print(err) +} + +// setDelegate sets the ErrorHandler delegate. +func (d *ErrDelegator) setDelegate(eh ErrorHandler) { + d.delegate.Store(&eh) +} + +type errorHandlerHolder struct { + eh ErrorHandler +} + +var ( + globalErrorHandler = defaultErrorHandler() + delegateErrorHandlerOnce sync.Once +) + +// GetErrorHandler returns the global ErrorHandler instance. +// +// The default ErrorHandler instance returned will log all errors to STDERR +// until an override ErrorHandler is set with SetErrorHandler. All +// ErrorHandler returned prior to this will automatically forward errors to +// the set instance instead of logging. +// +// Subsequent calls to SetErrorHandler after the first will not forward errors +// to the new ErrorHandler for prior returned instances. +func GetErrorHandler() ErrorHandler { + return globalErrorHandler.Load().(errorHandlerHolder).eh +} + +// SetErrorHandler sets the global ErrorHandler to h. +// +// The first time this is called all ErrorHandler previously returned from +// GetErrorHandler will send errors to h instead of the default logging +// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not +// delegate errors to h. +func SetErrorHandler(h ErrorHandler) { + current := GetErrorHandler() + + if _, cOk := current.(*ErrDelegator); cOk { + if _, ehOk := h.(*ErrDelegator); ehOk && current == h { + // Do not assign to the delegate of the default ErrDelegator to be + // itself. + log.Print(errors.New("no ErrorHandler delegate configured"), " ErrorHandler remains its current value.") + return + } + } + + delegateErrorHandlerOnce.Do(func() { + if def, ok := current.(*ErrDelegator); ok { + def.setDelegate(h) + } + }) + globalErrorHandler.Store(errorHandlerHolder{eh: h}) +} + +func defaultErrorHandler() *atomic.Value { + v := &atomic.Value{} + v.Store(errorHandlerHolder{eh: &ErrDelegator{}}) + return v +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/handler.go b/vendor/go.opentelemetry.io/otel/internal/global/handler.go index 2e47b2964..77d0425f5 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/handler.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/handler.go @@ -5,33 +5,13 @@ package global // import "go.opentelemetry.io/otel/internal/global" import ( - "log" - "sync/atomic" + "go.opentelemetry.io/otel/internal/errorhandler" ) -// ErrorHandler handles irremediable events. -type ErrorHandler interface { - // Handle handles any error deemed irremediable by an OpenTelemetry - // component. - Handle(error) -} +// ErrorHandler is an alias for errorhandler.ErrorHandler, kept for backward +// compatibility with existing callers of internal/global. +type ErrorHandler = errorhandler.ErrorHandler -type ErrDelegator struct { - delegate atomic.Pointer[ErrorHandler] -} - -// Compile-time check that delegator implements ErrorHandler. -var _ ErrorHandler = (*ErrDelegator)(nil) - -func (d *ErrDelegator) Handle(err error) { - if eh := d.delegate.Load(); eh != nil { - (*eh).Handle(err) - return - } - log.Print(err) -} - -// setDelegate sets the ErrorHandler delegate. -func (d *ErrDelegator) setDelegate(eh ErrorHandler) { - d.delegate.Store(&eh) -} +// ErrDelegator is an alias for errorhandler.ErrDelegator, kept for backward +// compatibility with existing callers of internal/global. +type ErrDelegator = errorhandler.ErrDelegator diff --git a/vendor/go.opentelemetry.io/otel/internal/global/state.go b/vendor/go.opentelemetry.io/otel/internal/global/state.go index 204ea142a..225c9e501 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/state.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/state.go @@ -8,16 +8,13 @@ import ( "sync" "sync/atomic" + "go.opentelemetry.io/otel/internal/errorhandler" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" ) type ( - errorHandlerHolder struct { - eh ErrorHandler - } - tracerProviderHolder struct { tp trace.TracerProvider } @@ -32,12 +29,10 @@ type ( ) var ( - globalErrorHandler = defaultErrorHandler() globalTracer = defaultTracerValue() globalPropagators = defaultPropagatorsValue() globalMeterProvider = defaultMeterProvider() - delegateErrorHandlerOnce sync.Once delegateTraceOnce sync.Once delegateTextMapPropagatorOnce sync.Once delegateMeterOnce sync.Once @@ -53,7 +48,7 @@ var ( // Subsequent calls to SetErrorHandler after the first will not forward errors // to the new ErrorHandler for prior returned instances. func GetErrorHandler() ErrorHandler { - return globalErrorHandler.Load().(errorHandlerHolder).eh + return errorhandler.GetErrorHandler() } // SetErrorHandler sets the global ErrorHandler to h. @@ -63,26 +58,7 @@ func GetErrorHandler() ErrorHandler { // ErrorHandler. Subsequent calls will set the global ErrorHandler, but not // delegate errors to h. func SetErrorHandler(h ErrorHandler) { - current := GetErrorHandler() - - if _, cOk := current.(*ErrDelegator); cOk { - if _, ehOk := h.(*ErrDelegator); ehOk && current == h { - // Do not assign to the delegate of the default ErrDelegator to be - // itself. - Error( - errors.New("no ErrorHandler delegate configured"), - "ErrorHandler remains its current value.", - ) - return - } - } - - delegateErrorHandlerOnce.Do(func() { - if def, ok := current.(*ErrDelegator); ok { - def.setDelegate(h) - } - }) - globalErrorHandler.Store(errorHandlerHolder{eh: h}) + errorhandler.SetErrorHandler(h) } // TracerProvider is the internal implementation for global.TracerProvider. @@ -174,12 +150,6 @@ func SetMeterProvider(mp metric.MeterProvider) { globalMeterProvider.Store(meterProviderHolder{mp: mp}) } -func defaultErrorHandler() *atomic.Value { - v := &atomic.Value{} - v.Store(errorHandlerHolder{eh: &ErrDelegator{}}) - return v -} - func defaultTracerValue() *atomic.Value { v := &atomic.Value{} v.Store(tracerProviderHolder{tp: &tracerProvider{}}) diff --git a/vendor/go.opentelemetry.io/otel/log/keyvalue.go b/vendor/go.opentelemetry.io/otel/log/keyvalue.go index f87cee04d..dd15ee3b8 100644 --- a/vendor/go.opentelemetry.io/otel/log/keyvalue.go +++ b/vendor/go.opentelemetry.io/otel/log/keyvalue.go @@ -390,7 +390,7 @@ func (a KeyValue) String() string { // ValueFromAttribute converts [attribute.Value] to [Value]. func ValueFromAttribute(value attribute.Value) Value { switch value.Type() { - case attribute.INVALID: + case attribute.EMPTY: return Value{} case attribute.BOOL: return BoolValue(value.AsBool()) diff --git a/vendor/go.opentelemetry.io/otel/log/record.go b/vendor/go.opentelemetry.io/otel/log/record.go index adde7a0dc..cf62deb9e 100644 --- a/vendor/go.opentelemetry.io/otel/log/record.go +++ b/vendor/go.opentelemetry.io/otel/log/record.go @@ -26,6 +26,7 @@ type Record struct { severity Severity severityText string body Value + err error // The fields below are for optimizing the implementation of Attributes and // AddAttributes. This design is borrowed from the slog Record type: @@ -110,6 +111,16 @@ func (r *Record) SetBody(v Value) { r.body = v } +// Err returns the associated error if one has been set. +func (r *Record) Err() error { + return r.err +} + +// SetErr sets the associated error. Passing nil clears the error. +func (r *Record) SetErr(err error) { + r.err = err +} + // WalkAttributes walks all attributes the log record holds by calling f for // each on each [KeyValue] in the [Record]. Iteration stops if f returns false. func (r *Record) WalkAttributes(f func(KeyValue) bool) { diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go index eb4f5961f..466812d34 100644 --- a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go +++ b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go @@ -211,6 +211,9 @@ type Float64Observer interface { // // Use the WithAttributeSet (or, if performance is not a concern, // the WithAttributes) option to include measurement attributes. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Observe(value float64, options ...ObserveOption) } diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go index 1dfc4b0f2..66c971bd8 100644 --- a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go +++ b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go @@ -210,6 +210,9 @@ type Int64Observer interface { // // Use the WithAttributeSet (or, if performance is not a concern, // the WithAttributes) option to include measurement attributes. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Observe(value int64, options ...ObserveOption) } diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go index a16c4c0a1..5606ec4bd 100644 --- a/vendor/go.opentelemetry.io/otel/metric/meter.go +++ b/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -30,6 +30,9 @@ type MeterProvider interface { // // If the name is empty, then an implementation defined default name will // be used instead. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Meter(name string, opts ...MeterOption) Meter } @@ -51,6 +54,9 @@ type Meter interface { // The name needs to conform to the OpenTelemetry instrument name syntax. // See the Instrument Name section of the package documentation for more // information. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error) // Int64UpDownCounter returns a new Int64UpDownCounter instrument @@ -61,6 +67,9 @@ type Meter interface { // The name needs to conform to the OpenTelemetry instrument name syntax. // See the Instrument Name section of the package documentation for more // information. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error) // Int64Histogram returns a new Int64Histogram instrument identified by @@ -71,6 +80,9 @@ type Meter interface { // The name needs to conform to the OpenTelemetry instrument name syntax. // See the Instrument Name section of the package documentation for more // information. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error) // Int64Gauge returns a new Int64Gauge instrument identified by name and @@ -80,6 +92,9 @@ type Meter interface { // The name needs to conform to the OpenTelemetry instrument name syntax. // See the Instrument Name section of the package documentation for more // information. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error) // Int64ObservableCounter returns a new Int64ObservableCounter identified @@ -95,6 +110,9 @@ type Meter interface { // The name needs to conform to the OpenTelemetry instrument name syntax. // See the Instrument Name section of the package documentation for more // information. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error) // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter @@ -110,6 +128,9 @@ type Meter interface { // The name needs to conform to the OpenTelemetry instrument name syntax. // See the Instrument Name section of the package documentation for more // information. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Int64ObservableUpDownCounter( name string, options ...Int64ObservableUpDownCounterOption, @@ -128,6 +149,9 @@ type Meter interface { // The name needs to conform to the OpenTelemetry instrument name syntax. // See the Instrument Name section of the package documentation for more // information. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Int64ObservableGauge(name string, options ...Int64ObservableGaugeOption) (Int64ObservableGauge, error) // Float64Counter returns a new Float64Counter instrument identified by @@ -148,6 +172,9 @@ type Meter interface { // The name needs to conform to the OpenTelemetry instrument name syntax. // See the Instrument Name section of the package documentation for more // information. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error) // Float64Histogram returns a new Float64Histogram instrument identified by @@ -158,6 +185,9 @@ type Meter interface { // The name needs to conform to the OpenTelemetry instrument name syntax. // See the Instrument Name section of the package documentation for more // information. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error) // Float64Gauge returns a new Float64Gauge instrument identified by name and @@ -167,6 +197,9 @@ type Meter interface { // The name needs to conform to the OpenTelemetry instrument name syntax. // See the Instrument Name section of the package documentation for more // information. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error) // Float64ObservableCounter returns a new Float64ObservableCounter @@ -182,6 +215,9 @@ type Meter interface { // The name needs to conform to the OpenTelemetry instrument name syntax. // See the Instrument Name section of the package documentation for more // information. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error) // Float64ObservableUpDownCounter returns a new @@ -197,6 +233,9 @@ type Meter interface { // The name needs to conform to the OpenTelemetry instrument name syntax. // See the Instrument Name section of the package documentation for more // information. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Float64ObservableUpDownCounter( name string, options ...Float64ObservableUpDownCounterOption, @@ -215,6 +254,9 @@ type Meter interface { // The name needs to conform to the OpenTelemetry instrument name syntax. // See the Instrument Name section of the package documentation for more // information. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Float64ObservableGauge(name string, options ...Float64ObservableGaugeOption) (Float64ObservableGauge, error) // RegisterCallback registers f to be called during the collection of a @@ -229,6 +271,9 @@ type Meter interface { // If no instruments are passed, f should not be registered nor called // during collection. // + // Implementations of this method need to be safe for a user to call + // concurrently. + // // The function f needs to be concurrent safe. RegisterCallback(f Callback, instruments ...Observable) (Registration, error) } @@ -263,9 +308,15 @@ type Observer interface { embedded.Observer // ObserveFloat64 records the float64 value for obsrv. + // + // Implementations of this method need to be safe for a user to call + // concurrently. ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption) // ObserveInt64 records the int64 value for obsrv. + // + // Implementations of this method need to be safe for a user to call + // concurrently. ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption) } @@ -283,6 +334,7 @@ type Registration interface { // Unregister removes the callback registration from a Meter. // - // This method needs to be idempotent and concurrent safe. + // Implementations of this method need to be idempotent and safe for a user + // to call concurrently. Unregister() error } diff --git a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go index 57a74c5e6..abb3051d7 100644 --- a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go +++ b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go @@ -24,12 +24,18 @@ type Float64Counter interface { // // Use the WithAttributeSet (or, if performance is not a concern, // the WithAttributes) option to include measurement attributes. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Add(ctx context.Context, incr float64, options ...AddOption) // Enabled reports whether the instrument will process measurements for the given context. // // This function can be used in places where measuring an instrument // would result in computationally expensive operations. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Enabled(context.Context) bool } @@ -83,12 +89,18 @@ type Float64UpDownCounter interface { // // Use the WithAttributeSet (or, if performance is not a concern, // the WithAttributes) option to include measurement attributes. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Add(ctx context.Context, incr float64, options ...AddOption) // Enabled reports whether the instrument will process measurements for the given context. // // This function can be used in places where measuring an instrument // would result in computationally expensive operations. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Enabled(context.Context) bool } @@ -142,12 +154,18 @@ type Float64Histogram interface { // // Use the WithAttributeSet (or, if performance is not a concern, // the WithAttributes) option to include measurement attributes. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Record(ctx context.Context, incr float64, options ...RecordOption) // Enabled reports whether the instrument will process measurements for the given context. // // This function can be used in places where measuring an instrument // would result in computationally expensive operations. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Enabled(context.Context) bool } @@ -206,12 +224,18 @@ type Float64Gauge interface { // // Use the WithAttributeSet (or, if performance is not a concern, // the WithAttributes) option to include measurement attributes. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Record(ctx context.Context, value float64, options ...RecordOption) // Enabled reports whether the instrument will process measurements for the given context. // // This function can be used in places where measuring an instrument // would result in computationally expensive operations. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Enabled(context.Context) bool } diff --git a/vendor/go.opentelemetry.io/otel/metric/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/syncint64.go index ac2d033ea..5bbfaf039 100644 --- a/vendor/go.opentelemetry.io/otel/metric/syncint64.go +++ b/vendor/go.opentelemetry.io/otel/metric/syncint64.go @@ -24,12 +24,18 @@ type Int64Counter interface { // // Use the WithAttributeSet (or, if performance is not a concern, // the WithAttributes) option to include measurement attributes. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Add(ctx context.Context, incr int64, options ...AddOption) // Enabled reports whether the instrument will process measurements for the given context. // // This function can be used in places where measuring an instrument // would result in computationally expensive operations. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Enabled(context.Context) bool } @@ -83,12 +89,18 @@ type Int64UpDownCounter interface { // // Use the WithAttributeSet (or, if performance is not a concern, // the WithAttributes) option to include measurement attributes. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Add(ctx context.Context, incr int64, options ...AddOption) // Enabled reports whether the instrument will process measurements for the given context. // // This function can be used in places where measuring an instrument // would result in computationally expensive operations. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Enabled(context.Context) bool } @@ -142,12 +154,18 @@ type Int64Histogram interface { // // Use the WithAttributeSet (or, if performance is not a concern, // the WithAttributes) option to include measurement attributes. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Record(ctx context.Context, incr int64, options ...RecordOption) // Enabled reports whether the instrument will process measurements for the given context. // // This function can be used in places where measuring an instrument // would result in computationally expensive operations. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Enabled(context.Context) bool } @@ -206,12 +224,18 @@ type Int64Gauge interface { // // Use the WithAttributeSet (or, if performance is not a concern, // the WithAttributes) option to include measurement attributes. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Record(ctx context.Context, value int64, options ...RecordOption) // Enabled reports whether the instrument will process measurements for the given context. // // This function can be used in places where measuring an instrument // would result in computationally expensive operations. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Enabled(context.Context) bool } diff --git a/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/vendor/go.opentelemetry.io/otel/propagation/baggage.go index 051882602..2ecca3fed 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/baggage.go +++ b/vendor/go.opentelemetry.io/otel/propagation/baggage.go @@ -7,9 +7,16 @@ import ( "context" "go.opentelemetry.io/otel/baggage" + "go.opentelemetry.io/otel/internal/errorhandler" ) -const baggageHeader = "baggage" +const ( + baggageHeader = "baggage" + + // W3C Baggage specification limits. + // https://www.w3.org/TR/baggage/#limits + maxMembers = 64 +) // Baggage is a propagator that supports the W3C Baggage format. // @@ -50,6 +57,9 @@ func extractSingleBaggage(parent context.Context, carrier TextMapCarrier) contex bag, err := baggage.Parse(bStr) if err != nil { + errorhandler.GetErrorHandler().Handle(err) + } + if bag.Len() == 0 { return parent } return baggage.ContextWithBaggage(parent, bag) @@ -60,17 +70,27 @@ func extractMultiBaggage(parent context.Context, carrier ValuesGetter) context.C if len(bVals) == 0 { return parent } + var members []baggage.Member for _, bStr := range bVals { currBag, err := baggage.Parse(bStr) if err != nil { + errorhandler.GetErrorHandler().Handle(err) + } + if currBag.Len() == 0 { continue } members = append(members, currBag.Members()...) + if len(members) >= maxMembers { + break + } } b, err := baggage.New(members...) - if err != nil || b.Len() == 0 { + if err != nil { + errorhandler.GetErrorHandler().Handle(err) + } + if b.Len() == 0 { return parent } return baggage.ContextWithBaggage(parent, b) diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go index 271ab71f1..11f404deb 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go +++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go @@ -46,8 +46,8 @@ func (TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { carrier.Set(tracestateHeader, ts) } - // Clear all flags other than the trace-context supported sampling bit. - flags := sc.TraceFlags() & trace.FlagsSampled + // Preserve only the spec-defined flags: sampled (0x01) and random (0x02). + flags := sc.TraceFlags() & (trace.FlagsSampled | trace.FlagsRandom) var sb strings.Builder sb.Grow(2 + 32 + 16 + 2 + 3) @@ -104,14 +104,13 @@ func (TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { if !extractPart(opts[:], &h, 2) { return trace.SpanContext{} } - if version == 0 && (h != "" || opts[0] > 2) { - // version 0 not allow extra - // version 0 not allow other flag + if version == 0 && (h != "" || opts[0] > 3) { + // version 0 does not allow extra fields or reserved flag bits. return trace.SpanContext{} } - // Clear all flags other than the trace-context supported sampling bit. - scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled // nolint:gosec // slice size already checked. + scc.TraceFlags = trace.TraceFlags(opts[0]) & //nolint:gosec // slice size already checked. + (trace.FlagsSampled | trace.FlagsRandom) // Ignore the error returned here. Failure to parse tracestate MUST NOT // affect the parsing of traceparent according to the W3C tracecontext diff --git a/vendor/go.opentelemetry.io/otel/requirements.txt b/vendor/go.opentelemetry.io/otel/requirements.txt index 1bb55fb1c..7c541dee7 100644 --- a/vendor/go.opentelemetry.io/otel/requirements.txt +++ b/vendor/go.opentelemetry.io/otel/requirements.txt @@ -1 +1 @@ -codespell==2.4.1 +codespell==2.4.2 diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go b/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go index bfeb73e81..694b64a31 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go @@ -37,3 +37,18 @@ var Observability = newFeature( return "", false }, ) + +// PerSeriesStartTimestamps is an experimental feature flag that determines if the SDK +// uses the new Start Timestamps specification. +// +// To enable this feature set the OTEL_GO_X_PER_SERIES_START_TIMESTAMPS environment variable +// to the case-insensitive string value of "true". +var PerSeriesStartTimestamps = newFeature( + []string{"PER_SERIES_START_TIMESTAMPS"}, + func(v string) (bool, bool) { + if strings.EqualFold(v, "true") { + return true, true + } + return false, false + }, +) diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/batch.go b/vendor/go.opentelemetry.io/otel/sdk/log/batch.go index c54407e67..a03248eba 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/batch.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/batch.go @@ -335,7 +335,7 @@ func (q *queue) TryDequeue(buf []Record, write func([]Record) bool) int { n := min(len(buf), q.len) for i := range n { - buf[i] = q.read.Value + buf[i] = q.read.Value // nolint:gosec // n is bounded by len(buf) q.read = q.read.Next() } diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/instrumentation.go b/vendor/go.opentelemetry.io/otel/sdk/log/instrumentation.go index c08f00d78..56c50ae9e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/instrumentation.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/instrumentation.go @@ -11,8 +11,8 @@ import ( "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/sdk" "go.opentelemetry.io/otel/sdk/log/internal/x" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" - "go.opentelemetry.io/otel/semconv/v1.39.0/otelconv" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" + "go.opentelemetry.io/otel/semconv/v1.40.0/otelconv" ) // newRecordCounterIncr returns a function that increments the log record diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/internal/observ/simple_log_processor.go b/vendor/go.opentelemetry.io/otel/sdk/log/internal/observ/simple_log_processor.go index 376069fb4..b84509a2a 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/internal/observ/simple_log_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/internal/observ/simple_log_processor.go @@ -14,8 +14,8 @@ import ( "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/sdk" "go.opentelemetry.io/otel/sdk/log/internal/x" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" - "go.opentelemetry.io/otel/semconv/v1.39.0/otelconv" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" + "go.opentelemetry.io/otel/semconv/v1.40.0/otelconv" ) const ( @@ -104,7 +104,9 @@ func NewSLP(id int64) (*SLP, error) { // LogProcessed records that a log has been processed by the SimpleLogProcessor. // If err is non-nil, it records the processing error as an attribute. func (slp *SLP) LogProcessed(ctx context.Context, err error) { - slp.processed.Add(ctx, 1, slp.addOption(err)...) + if slp.processed.Enabled(ctx) { + slp.processed.Add(ctx, 1, slp.addOption(err)...) + } } func (slp *SLP) addOption(err error) []metric.AddOption { diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/logger.go b/vendor/go.opentelemetry.io/otel/sdk/log/logger.go index f43a867c8..7efb96240 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/logger.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/logger.go @@ -5,17 +5,25 @@ package log // import "go.opentelemetry.io/otel/sdk/log" import ( "context" + "reflect" "time" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/log" "go.opentelemetry.io/otel/log/embedded" "go.opentelemetry.io/otel/sdk/instrumentation" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" "go.opentelemetry.io/otel/trace" ) var now = time.Now +const ( + exceptionTypeKey = string(semconv.ExceptionTypeKey) + exceptionMessageKey = string(semconv.ExceptionMessageKey) + exceptionStacktraceKey = string(semconv.ExceptionStacktraceKey) +) + // Compile-time check logger implements log.Logger. var _ log.Logger = (*logger)(nil) @@ -108,10 +116,70 @@ func (l *logger) newRecord(ctx context.Context, r log.Record) Record { newRecord.observedTimestamp = now() } + hasExceptionAttr := false r.WalkAttributes(func(kv log.KeyValue) bool { + switch kv.Key { + case exceptionTypeKey, exceptionMessageKey, exceptionStacktraceKey: + hasExceptionAttr = true + } newRecord.AddAttributes(kv) return true }) + if err := r.Err(); err != nil && !hasExceptionAttr { + addExceptionAttrs(&newRecord, err) + } + return newRecord } + +func addExceptionAttrs(r *Record, err error) { + var attrs [2]log.KeyValue + n := 0 + if msg := err.Error(); msg != "" { + if r.attributeCountLimit > 0 && r.attributeCountLimit-r.AttributesLen() < n+1 { + goto flush + } + attrs[n] = log.String(exceptionMessageKey, msg) + n++ + } + if errType := errorType(err); errType != "" { + if r.attributeCountLimit > 0 && r.attributeCountLimit-r.AttributesLen() < n+1 { + goto flush + } + attrs[n] = log.String(exceptionTypeKey, errType) + n++ + } + +flush: + if n > 0 { + r.addAttrs(attrs[:n]) + } +} + +func errorType(err error) string { + if et, ok := err.(interface{ ErrorType() string }); ok { + if s := et.ErrorType(); s != "" { + return s + } + } + + t := reflect.TypeOf(err) + if t == nil { + return "" + } + + pkg, name := t.PkgPath(), t.Name() + if pkg != "" && name != "" { + return pkg + "." + name + } + + // The type has no package path or name (predeclared, not-defined, + // or alias for a not-defined type). + // + // The type has no package path or name (predeclared, not-defined, + // or alias for a not-defined type). + // + // This is not guaranteed to be unique, but is a best effort. + return t.String() +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/config.go b/vendor/go.opentelemetry.io/otel/sdk/metric/config.go index c6440a134..306e5e3cd 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/config.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/config.go @@ -160,12 +160,14 @@ func WithExemplarFilter(filter exemplar.Filter) Option { }) } -// WithCardinalityLimit sets the cardinality limit for the MeterProvider. +// WithCardinalityLimit sets the global cardinality limit for the MeterProvider. // // The cardinality limit is the hard limit on the number of metric datapoints // that can be collected for a single instrument in a single collect cycle. // // Setting this to a zero or negative value means no limit is applied. +// This value applies to all instrument kinds, but can be overridden per kind by +// the reader's cardinality limit selector (see [WithCardinalityLimitSelector]). func WithCardinalityLimit(limit int) Option { // For backward compatibility, the environment variable `OTEL_GO_X_CARDINALITY_LIMIT` // can also be used to set this value. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go index 2aeba4378..312d73c45 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go @@ -8,10 +8,12 @@ import ( "errors" "math" "sync" + "sync/atomic" "time" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/internal/x" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) @@ -20,11 +22,6 @@ const ( expoMinScale = -10 smallestNonZeroNormalFloat64 = 0x1p-1022 - - // These redefine the Math constants with a type, so the compiler won't coerce - // them into an int on 32 bit platforms. - maxInt64 int64 = math.MaxInt64 - minInt64 int64 = math.MinInt64 ) // expoHistogramDataPoint is a single data point in an exponential histogram. @@ -32,19 +29,19 @@ type expoHistogramDataPoint[N int64 | float64] struct { attrs attribute.Set res FilteredExemplarReservoir[N] - min N - max N - sum N + minMax atomicMinMax[N] + sum atomicCounter[N] maxSize int noMinMax bool noSum bool - scale int32 + scale atomic.Int32 posBuckets expoBuckets negBuckets expoBuckets - zeroCount uint64 + zeroCount atomic.Uint64 + startTime time.Time } func newExpoHistogramDataPoint[N int64 | float64]( @@ -53,42 +50,30 @@ func newExpoHistogramDataPoint[N int64 | float64]( maxScale int32, noMinMax, noSum bool, ) *expoHistogramDataPoint[N] { // nolint:revive // we need this control flag - f := math.MaxFloat64 - ma := N(f) // if N is int64, max will overflow to -9223372036854775808 - mi := N(-f) - if N(maxInt64) > N(f) { - ma = N(maxInt64) - mi = N(minInt64) - } - return &expoHistogramDataPoint[N]{ - attrs: attrs, - min: ma, - max: mi, - maxSize: maxSize, - noMinMax: noMinMax, - noSum: noSum, - scale: maxScale, + dp := &expoHistogramDataPoint[N]{ + attrs: attrs, + maxSize: maxSize, + noMinMax: noMinMax, + noSum: noSum, + startTime: now(), } + dp.scale.Store(maxScale) + return dp } // record adds a new measurement to the histogram. It will rescale the buckets if needed. func (p *expoHistogramDataPoint[N]) record(v N) { if !p.noMinMax { - if v < p.min { - p.min = v - } - if v > p.max { - p.max = v - } + p.minMax.Update(v) } if !p.noSum { - p.sum += v + p.sum.add(v) } absV := math.Abs(float64(v)) if float64(absV) == 0.0 { - p.zeroCount++ + p.zeroCount.Add(1) return } @@ -102,14 +87,15 @@ func (p *expoHistogramDataPoint[N]) record(v N) { // If the new bin would make the counts larger than maxScale, we need to // downscale current measurements. if scaleDelta := p.scaleChange(bin, bucket.startBin, len(bucket.counts)); scaleDelta > 0 { - if p.scale-scaleDelta < expoMinScale { + currentScale := p.scale.Load() + if currentScale-scaleDelta < expoMinScale { // With a scale of -10 there is only two buckets for the whole range of float64 values. // This can only happen if there is a max size of 1. otel.Handle(errors.New("exponential histogram scale underflow")) return } // Downscale - p.scale -= scaleDelta + p.scale.Add(-scaleDelta) p.posBuckets.downscale(scaleDelta) p.negBuckets.downscale(scaleDelta) @@ -124,7 +110,8 @@ func (p *expoHistogramDataPoint[N]) getBin(v float64) int32 { frac, expInt := math.Frexp(v) // 11-bit exponential. exp := int32(expInt) // nolint: gosec - if p.scale <= 0 { + scale := p.scale.Load() + if scale <= 0 { // Because of the choice of fraction is always 1 power of two higher than we want. var correction int32 = 1 if frac == .5 { @@ -132,9 +119,9 @@ func (p *expoHistogramDataPoint[N]) getBin(v float64) int32 { // will be one higher than we want. correction = 2 } - return (exp - correction) >> (-p.scale) + return (exp - correction) >> (-scale) } - return exp<= b.startBin && int(bin) <= endBin { - b.counts[bin-b.startBin]++ + b.counts[bin-b.startBin].Add(1) return } // if the new bin is before the current start add spaces to the counts @@ -223,16 +211,22 @@ func (b *expoBuckets) record(bin int32) { shift := b.startBin - bin if newLength > cap(b.counts) { - b.counts = append(b.counts, make([]uint64, newLength-len(b.counts))...) + b.counts = append(b.counts, make([]atomic.Uint64, newLength-len(b.counts))...) } - copy(b.counts[shift:origLen+int(shift)], b.counts) b.counts = b.counts[:newLength] + + // Shift existing elements to the right. Go's copy() doesn't work for + // structs like atomic.Uint64. + for i := origLen - 1; i >= 0; i-- { + b.counts[i+int(shift)].Store(b.counts[i].Load()) + } + for i := 1; i < int(shift); i++ { - b.counts[i] = 0 + b.counts[i].Store(0) } b.startBin = bin - b.counts[0] = 1 + b.counts[0].Store(1) return } // if the new is after the end add spaces to the end @@ -240,15 +234,15 @@ func (b *expoBuckets) record(bin int32) { if int(bin-b.startBin) < cap(b.counts) { b.counts = b.counts[:bin-b.startBin+1] for i := endBin + 1 - int(b.startBin); i < len(b.counts); i++ { - b.counts[i] = 0 + b.counts[i].Store(0) } - b.counts[bin-b.startBin] = 1 + b.counts[bin-b.startBin].Store(1) return } - end := make([]uint64, int(bin-b.startBin)-len(b.counts)+1) + end := make([]atomic.Uint64, int(bin-b.startBin)-len(b.counts)+1) b.counts = append(b.counts, end...) - b.counts[bin-b.startBin] = 1 + b.counts[bin-b.startBin].Store(1) } } @@ -275,10 +269,10 @@ func (b *expoBuckets) downscale(delta int32) { for i := 1; i < len(b.counts); i++ { idx := i + int(offset) if idx%int(steps) == 0 { - b.counts[idx/int(steps)] = b.counts[i] + b.counts[idx/int(steps)].Store(b.counts[i].Load()) continue } - b.counts[idx/int(steps)] += b.counts[i] + b.counts[idx/int(steps)].Add(b.counts[i].Load()) } lastIdx := (len(b.counts) - 1 + int(offset)) / int(steps) @@ -288,8 +282,8 @@ func (b *expoBuckets) downscale(delta int32) { func (b *expoBuckets) count() uint64 { var total uint64 - for _, count := range b.counts { - total += count + for i := range b.counts { + total += b.counts[i].Load() } return total } @@ -386,8 +380,8 @@ func (e *expoHistogram[N]) delta( hDPts[i].StartTime = e.start hDPts[i].Time = t hDPts[i].Count = val.count() - hDPts[i].Scale = val.scale - hDPts[i].ZeroCount = val.zeroCount + hDPts[i].Scale = val.scale.Load() + hDPts[i].ZeroCount = val.zeroCount.Load() hDPts[i].ZeroThreshold = 0.0 hDPts[i].PositiveBucket.Offset = val.posBuckets.startBin @@ -396,7 +390,9 @@ func (e *expoHistogram[N]) delta( len(val.posBuckets.counts), len(val.posBuckets.counts), ) - copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts) + for j := range val.posBuckets.counts { + hDPts[i].PositiveBucket.Counts[j] = val.posBuckets.counts[j].Load() + } hDPts[i].NegativeBucket.Offset = val.negBuckets.startBin hDPts[i].NegativeBucket.Counts = reset( @@ -404,14 +400,18 @@ func (e *expoHistogram[N]) delta( len(val.negBuckets.counts), len(val.negBuckets.counts), ) - copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts) + for j := range val.negBuckets.counts { + hDPts[i].NegativeBucket.Counts[j] = val.negBuckets.counts[j].Load() + } if !e.noSum { - hDPts[i].Sum = val.sum + hDPts[i].Sum = val.sum.load() } if !e.noMinMax { - hDPts[i].Min = metricdata.NewExtrema(val.min) - hDPts[i].Max = metricdata.NewExtrema(val.max) + if val.minMax.set.Load() { + hDPts[i].Min = metricdata.NewExtrema(val.minMax.minimum.Load()) + hDPts[i].Max = metricdata.NewExtrema(val.minMax.maximum.Load()) + } } collectExemplars(&hDPts[i].Exemplars, val.res.Collect) @@ -443,14 +443,21 @@ func (e *expoHistogram[N]) cumulative( n := len(e.values) hDPts := reset(h.DataPoints, n, n) + perSeriesStartTimeEnabled := x.PerSeriesStartTimestamps.Enabled() + var i int for _, val := range e.values { hDPts[i].Attributes = val.attrs - hDPts[i].StartTime = e.start + + startTime := e.start + if perSeriesStartTimeEnabled { + startTime = val.startTime + } + hDPts[i].StartTime = startTime hDPts[i].Time = t hDPts[i].Count = val.count() - hDPts[i].Scale = val.scale - hDPts[i].ZeroCount = val.zeroCount + hDPts[i].Scale = val.scale.Load() + hDPts[i].ZeroCount = val.zeroCount.Load() hDPts[i].ZeroThreshold = 0.0 hDPts[i].PositiveBucket.Offset = val.posBuckets.startBin @@ -459,7 +466,9 @@ func (e *expoHistogram[N]) cumulative( len(val.posBuckets.counts), len(val.posBuckets.counts), ) - copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts) + for j := range val.posBuckets.counts { + hDPts[i].PositiveBucket.Counts[j] = val.posBuckets.counts[j].Load() + } hDPts[i].NegativeBucket.Offset = val.negBuckets.startBin hDPts[i].NegativeBucket.Counts = reset( @@ -467,14 +476,18 @@ func (e *expoHistogram[N]) cumulative( len(val.negBuckets.counts), len(val.negBuckets.counts), ) - copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts) + for j := range val.negBuckets.counts { + hDPts[i].NegativeBucket.Counts[j] = val.negBuckets.counts[j].Load() + } if !e.noSum { - hDPts[i].Sum = val.sum + hDPts[i].Sum = val.sum.load() } if !e.noMinMax { - hDPts[i].Min = metricdata.NewExtrema(val.min) - hDPts[i].Max = metricdata.NewExtrema(val.max) + if val.minMax.set.Load() { + hDPts[i].Min = metricdata.NewExtrema(val.minMax.minimum.Load()) + hDPts[i].Max = metricdata.NewExtrema(val.minMax.maximum.Load()) + } } collectExemplars(&hDPts[i].Exemplars, val.res.Collect) diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go index 421325fb7..83582c670 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go @@ -11,6 +11,7 @@ import ( "time" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/internal/x" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) @@ -27,8 +28,9 @@ type hotColdHistogramPoint[N int64 | float64] struct { hcwg hotColdWaitGroup hotColdPoint [2]histogramPointCounters[N] - attrs attribute.Set - res FilteredExemplarReservoir[N] + attrs attribute.Set + res FilteredExemplarReservoir[N] + startTime time.Time } // histogramPointCounters contains only the atomic counter data, and is used by @@ -298,6 +300,7 @@ func (s *cumulativeHistogram[N]) measure( counts: make([]atomic.Uint64, len(s.bounds)+1), }, }, + startTime: now(), } return hPt }).(*hotColdHistogramPoint[N]) @@ -339,16 +342,23 @@ func (s *cumulativeHistogram[N]) collect( // current length for capacity. hDPts := reset(h.DataPoints, 0, s.values.Len()) + perSeriesStartTimeEnabled := x.PerSeriesStartTimestamps.Enabled() + var i int s.values.Range(func(_, value any) bool { val := value.(*hotColdHistogramPoint[N]) + + startTime := s.start + if perSeriesStartTimeEnabled { + startTime = val.startTime + } // swap, observe, and clear the point readIdx := val.hcwg.swapHotAndWait() var bucketCounts []uint64 count := val.hotColdPoint[readIdx].loadCountsInto(&bucketCounts) newPt := metricdata.HistogramDataPoint[N]{ Attributes: val.attrs, - StartTime: s.start, + StartTime: startTime, Time: t, Count: count, Bounds: bounds, diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go index 4924d732c..4c004bc99 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go @@ -8,14 +8,16 @@ import ( "time" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/internal/x" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) // lastValuePoint is timestamped measurement data. type lastValuePoint[N int64 | float64] struct { - attrs attribute.Set - value atomicN[N] - res FilteredExemplarReservoir[N] + attrs attribute.Set + value atomicN[N] + res FilteredExemplarReservoir[N] + startTime time.Time } // lastValueMap summarizes a set of measurements as the last one made. @@ -31,10 +33,13 @@ func (s *lastValueMap[N]) measure( droppedAttr []attribute.KeyValue, ) { lv := s.values.LoadOrStoreAttr(fltrAttr, func(attr attribute.Set) any { - return &lastValuePoint[N]{ - res: s.newRes(attr), - attrs: attr, + p := &lastValuePoint[N]{ + res: s.newRes(attr), + attrs: attr, + startTime: now(), } + p.value.Store(value) + return p }).(*lastValuePoint[N]) lv.value.Store(value) @@ -156,12 +161,19 @@ func (s *cumulativeLastValue[N]) collect( // current length for capacity. dPts := reset(gData.DataPoints, 0, s.values.Len()) + perSeriesStartTimeEnabled := x.PerSeriesStartTimestamps.Enabled() + var i int s.values.Range(func(_, value any) bool { v := value.(*lastValuePoint[N]) + + startTime := s.start + if perSeriesStartTimeEnabled { + startTime = v.startTime + } newPt := metricdata.DataPoint[N]{ Attributes: v.attrs, - StartTime: s.start, + StartTime: startTime, Time: t, Value: v.value.Load(), } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go index 66cb68085..3fe7c7cf0 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go @@ -8,13 +8,15 @@ import ( "time" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/internal/x" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) type sumValue[N int64 | float64] struct { - n atomicCounter[N] - res FilteredExemplarReservoir[N] - attrs attribute.Set + n atomicCounter[N] + res FilteredExemplarReservoir[N] + attrs attribute.Set + startTime time.Time } type sumValueMap[N int64 | float64] struct { @@ -30,8 +32,9 @@ func (s *sumValueMap[N]) measure( ) { sv := s.values.LoadOrStoreAttr(fltrAttr, func(attr attribute.Set) any { return &sumValue[N]{ - res: s.newRes(attr), - attrs: attr, + res: s.newRes(attr), + attrs: attr, + startTime: now(), } }).(*sumValue[N]) sv.n.add(value) @@ -160,12 +163,19 @@ func (s *cumulativeSum[N]) collect( // current length for capacity. dPts := reset(sData.DataPoints, 0, s.values.Len()) + perSeriesStartTimeEnabled := x.PerSeriesStartTimestamps.Enabled() + var i int s.values.Range(func(_, value any) bool { val := value.(*sumValue[N]) + + startTime := s.start + if perSeriesStartTimeEnabled { + startTime = val.startTime + } newPt := metricdata.DataPoint[N]{ Attributes: val.attrs, - StartTime: s.start, + StartTime: startTime, Time: t, Value: val.n.load(), } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/observ/instrumentation.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/observ/instrumentation.go index 66788c9e9..2d2b987c5 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/observ/instrumentation.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/observ/instrumentation.go @@ -16,8 +16,8 @@ import ( "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/sdk" "go.opentelemetry.io/otel/sdk/internal/x" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" - "go.opentelemetry.io/otel/semconv/v1.39.0/otelconv" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" + "go.opentelemetry.io/otel/semconv/v1.40.0/otelconv" ) const ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go index 5b0630207..0357afd45 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go @@ -32,8 +32,9 @@ type ManualReader struct { isShutdown bool externalProducers atomic.Value - temporalitySelector TemporalitySelector - aggregationSelector AggregationSelector + temporalitySelector TemporalitySelector + aggregationSelector AggregationSelector + cardinalityLimitSelector CardinalityLimitSelector inst *observ.Instrumentation } @@ -45,8 +46,9 @@ var _ = map[Reader]struct{}{&ManualReader{}: {}} func NewManualReader(opts ...ManualReaderOption) *ManualReader { cfg := newManualReaderConfig(opts) r := &ManualReader{ - temporalitySelector: cfg.temporalitySelector, - aggregationSelector: cfg.aggregationSelector, + temporalitySelector: cfg.temporalitySelector, + aggregationSelector: cfg.aggregationSelector, + cardinalityLimitSelector: cfg.cardinalityLimitSelector, } r.externalProducers.Store(cfg.producers) @@ -89,6 +91,11 @@ func (mr *ManualReader) aggregation( return mr.aggregationSelector(kind) } +// cardinalityLimit returns the cardinality limit for kind. +func (mr *ManualReader) cardinalityLimit(kind InstrumentKind) (int, bool) { + return mr.cardinalityLimitSelector(kind) +} + // Shutdown closes any connections and frees any resources used by the reader. // // This method is safe to call concurrently. @@ -179,16 +186,18 @@ func (r *ManualReader) MarshalLog() any { // manualReaderConfig contains configuration options for a ManualReader. type manualReaderConfig struct { - temporalitySelector TemporalitySelector - aggregationSelector AggregationSelector - producers []Producer + temporalitySelector TemporalitySelector + aggregationSelector AggregationSelector + cardinalityLimitSelector CardinalityLimitSelector + producers []Producer } // newManualReaderConfig returns a manualReaderConfig configured with options. func newManualReaderConfig(opts []ManualReaderOption) manualReaderConfig { cfg := manualReaderConfig{ - temporalitySelector: DefaultTemporalitySelector, - aggregationSelector: DefaultAggregationSelector, + temporalitySelector: DefaultTemporalitySelector, + aggregationSelector: DefaultAggregationSelector, + cardinalityLimitSelector: defaultCardinalityLimitSelector, } for _, opt := range opts { cfg = opt.applyManual(cfg) diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go index ef40ef29a..d1efc9f37 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go @@ -15,7 +15,7 @@ import ( "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/metric/internal/observ" "go.opentelemetry.io/otel/sdk/metric/metricdata" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" ) // Default periodic reader timing. @@ -26,17 +26,19 @@ const ( // periodicReaderConfig contains configuration options for a PeriodicReader. type periodicReaderConfig struct { - interval time.Duration - timeout time.Duration - producers []Producer + interval time.Duration + timeout time.Duration + producers []Producer + cardinalityLimitSelector CardinalityLimitSelector } // newPeriodicReaderConfig returns a periodicReaderConfig configured with // options. func newPeriodicReaderConfig(options []PeriodicReaderOption) periodicReaderConfig { c := periodicReaderConfig{ - interval: envDuration(envInterval, defaultInterval), - timeout: envDuration(envTimeout, defaultTimeout), + interval: envDuration(envInterval, defaultInterval), + timeout: envDuration(envTimeout, defaultTimeout), + cardinalityLimitSelector: defaultCardinalityLimitSelector, } for _, o := range options { c = o.applyPeriodic(c) @@ -107,14 +109,17 @@ func WithInterval(d time.Duration) PeriodicReaderOption { // exporter. That is left to the user to accomplish. func NewPeriodicReader(exporter Exporter, options ...PeriodicReaderOption) *PeriodicReader { conf := newPeriodicReaderConfig(options) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel( //nolint:gosec // cancel called during PeriodicReader shutdown. + context.Background(), + ) r := &PeriodicReader{ - interval: conf.interval, - timeout: conf.timeout, - exporter: exporter, - flushCh: make(chan chan error), - cancel: cancel, - done: make(chan struct{}), + interval: conf.interval, + timeout: conf.timeout, + exporter: exporter, + flushCh: make(chan chan error), + cancel: cancel, + done: make(chan struct{}), + cardinalityLimitSelector: conf.cardinalityLimitSelector, rmPool: sync.Pool{ New: func() any { return &metricdata.ResourceMetrics{} @@ -168,6 +173,8 @@ type PeriodicReader struct { rmPool sync.Pool + cardinalityLimitSelector CardinalityLimitSelector + inst *observ.Instrumentation } @@ -220,6 +227,11 @@ func (r *PeriodicReader) aggregation( return r.exporter.Aggregation(kind) } +// cardinalityLimit returns the cardinality limit for kind. +func (r *PeriodicReader) cardinalityLimit(kind InstrumentKind) (int, bool) { + return r.cardinalityLimitSelector(kind) +} + // collectAndExport gather all metric data related to the periodicReader r from // the SDK and exports it with r's exporter. func (r *PeriodicReader) collectAndExport(ctx context.Context) error { diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go index ab269cdfd..34300a786 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go @@ -301,7 +301,7 @@ func (i *inserter[N]) addCallback(cback func(context.Context) error) { i.pipeline.callbacks = append(i.pipeline.callbacks, cback) } -var aggIDCount uint64 +var aggIDCount atomic.Uint64 // aggVal is the cached value in an aggregators cache. type aggVal[N int64 | float64] struct { @@ -395,9 +395,7 @@ func (i *inserter[N]) cachedAggregator( b.Filter = stream.AttributeFilter // A value less than or equal to zero will disable the aggregation // limits for the builder (an all the created aggregates). - // cardinalityLimit will be 0 by default if unset (or - // unrecognized input). Use that value directly. - b.AggregationLimit = i.pipeline.cardinalityLimit + b.AggregationLimit = i.getCardinalityLimit(kind) in, out, err := i.aggregateFunc(b, stream.Aggregation, kind) if err != nil { return aggVal[N]{0, nil, err} @@ -413,12 +411,24 @@ func (i *inserter[N]) cachedAggregator( unit: stream.Unit, compAgg: out, }) - id := atomic.AddUint64(&aggIDCount, 1) + id := aggIDCount.Add(1) return aggVal[N]{id, in, err} }) return cv.Measure, cv.ID, cv.Err } +// getCardinalityLimit returns the cardinality limit for the given instrument kind. +// When the reader's selector returns fallback = true, the pipeline's global +// limit is used, then the default if global is unset. When fallback is false, +// the selector's limit is used (0 or less means unlimited). +func (i *inserter[N]) getCardinalityLimit(kind InstrumentKind) int { + limit, fallback := i.pipeline.reader.cardinalityLimit(kind) + if fallback { + return i.pipeline.cardinalityLimit + } + return limit +} + // logConflict validates if an instrument with the same case-insensitive name // as id has already been created. If that instrument conflicts with id, a // warning is logged. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go index 7b205c736..99079dd27 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go @@ -59,6 +59,15 @@ type Reader interface { // Reader methods. aggregation(InstrumentKind) Aggregation // nolint:revive // import-shadow for method scoped by type. + // cardinalityLimit returns the cardinality limit for an instrument kind. + // When fallback is true, the pipeline falls back to the provider's global limit. + // When fallback is false, limit is used: 0 or less means no limit (unlimited), + // and a positive value is the limit for that kind. + // + // This method needs to be concurrent safe with itself and all the other + // Reader methods. + cardinalityLimit(InstrumentKind) (limit int, fallback bool) + // Collect gathers and returns all metric data related to the Reader from // the SDK and stores it in rm. An error is returned if this is called // after Shutdown or if rm is nil. @@ -192,6 +201,25 @@ func DefaultAggregationSelector(ik InstrumentKind) Aggregation { panic("unknown instrument kind") } +// CardinalityLimitSelector selects the cardinality limit to use based on the +// InstrumentKind. The cardinality limit is the maximum number of distinct +// attribute sets that can be recorded for a single instrument. +// +// The selector returns (limit, fallback). When fallback is true, the pipeline +// falls back to the provider's global cardinality limit. +// When fallback is false, the limit is applied: a value of 0 or less means +// no limit, and a positive value is the limit for that kind. +// To avoid overriding the provider's global limit, return (0, true). +type CardinalityLimitSelector func(InstrumentKind) (limit int, fallback bool) + +// defaultCardinalityLimitSelector is the default CardinalityLimitSelector used +// if WithCardinalityLimitSelector is not provided. It returns (0, true) for all +// instrument kinds, allowing the pipeline to fall back to the provider's global +// limit. +func defaultCardinalityLimitSelector(_ InstrumentKind) (int, bool) { + return 0, true +} + // ReaderOption is an option which can be applied to manual or Periodic // readers. type ReaderOption interface { @@ -220,3 +248,33 @@ func (o producerOption) applyPeriodic(c periodicReaderConfig) periodicReaderConf c.producers = append(c.producers, o.p) return c } + +// WithCardinalityLimitSelector sets the CardinalityLimitSelector a reader will +// use to determine the cardinality limit for an instrument based on its kind. +// If this option is not used, the reader will use the +// defaultCardinalityLimitSelector. +// +// The selector should return (limit, false) to set a positive limit, +// (0, false) to explicitly specify unlimited, or +// (0, true) to fall back to the provider's global limit. +// +// See [CardinalityLimitSelector] for more details. +func WithCardinalityLimitSelector(selector CardinalityLimitSelector) ReaderOption { + return cardinalityLimitSelectorOption{selector: selector} +} + +type cardinalityLimitSelectorOption struct { + selector CardinalityLimitSelector +} + +// applyManual returns a manualReaderConfig with option applied. +func (o cardinalityLimitSelectorOption) applyManual(c manualReaderConfig) manualReaderConfig { + c.cardinalityLimitSelector = o.selector + return c +} + +// applyPeriodic returns a periodicReaderConfig with option applied. +func (o cardinalityLimitSelectorOption) applyPeriodic(c periodicReaderConfig) periodicReaderConfig { + c.cardinalityLimitSelector = o.selector + return c +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go index ea9e076c7..26752be7d 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go @@ -5,5 +5,5 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric" // version is the current release version of the metric SDK in use. func version() string { - return "1.40.0" + return "1.43.0" } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go index 8a7bb330b..04f15fcd2 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -13,7 +13,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" ) type ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/config.go b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go index 0d6e213d9..a3d647d92 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/config.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go @@ -193,3 +193,11 @@ func WithContainer() Option { func WithContainerID() Option { return WithDetectors(cgroupContainerIDDetector{}) } + +// WithService adds all the Service attributes to the configured Resource. +func WithService() Option { + return WithDetectors( + defaultServiceInstanceIDDetector{}, + defaultServiceNameDetector{}, + ) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go index a19b39def..e977ff1c4 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go @@ -11,7 +11,7 @@ import ( "os" "regexp" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" ) type containerIDProvider func() (string, error) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go index c49157224..bc0e5c19e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go @@ -12,7 +12,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" ) const ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go index 023621ba7..755c08242 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go @@ -8,7 +8,7 @@ import ( "errors" "strings" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" ) type hostIDProvider func() (string, error) @@ -31,19 +31,19 @@ type hostIDReaderBSD struct { readFile fileReader } -// read attempts to read the machine-id from /etc/hostid. If not found it will -// execute `kenv -q smbios.system.uuid`. If neither location yields an id an -// error will be returned. +// read attempts to read the machine-id from /etc/hostid. +// If not found it will execute: /bin/kenv -q smbios.system.uuid. +// If neither location yields an id an error will be returned. func (r *hostIDReaderBSD) read() (string, error) { if result, err := r.readFile("/etc/hostid"); err == nil { return strings.TrimSpace(result), nil } - if result, err := r.execCommand("kenv", "-q", "smbios.system.uuid"); err == nil { + if result, err := r.execCommand("/bin/kenv", "-q", "smbios.system.uuid"); err == nil { return strings.TrimSpace(result), nil } - return "", errors.New("host id not found in: /etc/hostid or kenv") + return "", errors.New("host id not found in: /etc/hostid or /bin/kenv") } // hostIDReaderDarwin implements hostIDReader. diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go index 6354b3560..c95d87685 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go @@ -8,7 +8,7 @@ package resource // import "go.opentelemetry.io/otel/sdk/resource" import "os" func readFile(filename string) (string, error) { - b, err := os.ReadFile(filename) + b, err := os.ReadFile(filename) // nolint:gosec // false positive if err != nil { return "", err } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go index 534809e21..f5682cad4 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go @@ -8,7 +8,7 @@ import ( "strings" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" ) type osDescriptionProvider func() (string, error) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go index a1189553c..99dce64f6 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go @@ -11,7 +11,7 @@ import ( "path/filepath" "runtime" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" ) type ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go index 28e1e4f7e..f715be53e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go @@ -232,6 +232,15 @@ func Empty() *Resource { // Default returns an instance of Resource with a default // "service.name" and OpenTelemetrySDK attributes. func Default() *Resource { + return DefaultWithContext(context.Background()) +} + +// DefaultWithContext returns an instance of Resource with a default +// "service.name" and OpenTelemetrySDK attributes. +// +// If the default resource has already been initialized, the provided ctx +// is ignored and the cached resource is returned. +func DefaultWithContext(ctx context.Context) *Resource { defaultResourceOnce.Do(func() { var err error defaultDetectors := []Detector{ @@ -243,7 +252,7 @@ func Default() *Resource { defaultDetectors = append([]Detector{defaultServiceInstanceIDDetector{}}, defaultDetectors...) } defaultResource, err = Detect( - context.Background(), + ctx, defaultDetectors..., ) if err != nil { @@ -260,8 +269,14 @@ func Default() *Resource { // Environment returns an instance of Resource with attributes // extracted from the OTEL_RESOURCE_ATTRIBUTES environment variable. func Environment() *Resource { + return EnvironmentWithContext(context.Background()) +} + +// EnvironmentWithContext returns an instance of Resource with attributes +// extracted from the OTEL_RESOURCE_ATTRIBUTES environment variable. +func EnvironmentWithContext(ctx context.Context) *Resource { detector := &fromEnv{} - resource, err := detector.Detect(context.Background()) + resource, err := detector.Detect(ctx) if err != nil { otel.Handle(err) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go index 7d15cbb9c..32854b14a 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -68,7 +68,7 @@ type batchSpanProcessor struct { o BatchSpanProcessorOptions queue chan ReadOnlySpan - dropped uint32 + dropped atomic.Uint32 inst *observ.BSP @@ -123,12 +123,10 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO otel.Handle(err) } - bsp.stopWait.Add(1) - go func() { - defer bsp.stopWait.Done() + bsp.stopWait.Go(func() { bsp.processQueue() bsp.drainQueue() - }() + }) return bsp } @@ -295,7 +293,7 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { } if l := len(bsp.batch); l > 0 { - global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", atomic.LoadUint32(&bsp.dropped)) + global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", bsp.dropped.Load()) if bsp.inst != nil { bsp.inst.Processed(ctx, int64(l)) } @@ -423,7 +421,7 @@ func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan) case bsp.queue <- sd: return true default: - atomic.AddUint32(&bsp.dropped, 1) + bsp.dropped.Add(1) if bsp.inst != nil { bsp.inst.ProcessedQueueFull(ctx, 1) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/batch_span_processor.go index d9cfba0b4..c31e03aa0 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/batch_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/batch_span_processor.go @@ -13,8 +13,8 @@ import ( "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/sdk" "go.opentelemetry.io/otel/sdk/internal/x" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" - "go.opentelemetry.io/otel/semconv/v1.39.0/otelconv" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" + "go.opentelemetry.io/otel/semconv/v1.40.0/otelconv" ) const ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/simple_span_processor.go index 8afd05267..0e77cd953 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/simple_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/simple_span_processor.go @@ -13,8 +13,8 @@ import ( "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/sdk" "go.opentelemetry.io/otel/sdk/internal/x" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" - "go.opentelemetry.io/otel/semconv/v1.39.0/otelconv" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" + "go.opentelemetry.io/otel/semconv/v1.40.0/otelconv" ) var measureAttrsPool = sync.Pool{ diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/tracer.go index 13a2db296..560d316f2 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/tracer.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/tracer.go @@ -13,7 +13,7 @@ import ( "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/sdk" "go.opentelemetry.io/otel/sdk/internal/x" - "go.opentelemetry.io/otel/semconv/v1.39.0/otelconv" + "go.opentelemetry.io/otel/semconv/v1.40.0/otelconv" "go.opentelemetry.io/otel/trace" ) @@ -55,6 +55,10 @@ func NewTracer() (Tracer, error) { func (t Tracer) Enabled() bool { return t.enabled } func (t Tracer) SpanStarted(ctx context.Context, psc trace.SpanContext, span trace.Span) { + if !t.started.Enabled(ctx) { + return + } + key := spanStartedKey{ parent: parentStateNoParent, sampling: samplingStateDrop, @@ -89,6 +93,10 @@ func (t Tracer) SpanEnded(ctx context.Context, span trace.Span) { } func (t Tracer) spanLive(ctx context.Context, value int64, span trace.Span) { + if !t.live.Enabled(ctx) { + return + } + key := spanLiveKey{sampled: span.SpanContext().IsSampled()} opts := spanLiveOpts[key] t.live.Add(ctx, value, opts...) diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go index d2cf4ebd3..cd40d299d 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -5,6 +5,7 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" + "errors" "fmt" "sync" "sync/atomic" @@ -262,6 +263,7 @@ func (p *TracerProvider) ForceFlush(ctx context.Context) error { return nil } + var err error for _, sps := range spss { select { case <-ctx.Done(): @@ -269,11 +271,9 @@ func (p *TracerProvider) ForceFlush(ctx context.Context) error { default: } - if err := sps.sp.ForceFlush(ctx); err != nil { - return err - } + err = errors.Join(err, sps.sp.ForceFlush(ctx)) } - return nil + return err } // Shutdown shuts down TracerProvider. All registered span processors are shut down @@ -303,14 +303,7 @@ func (p *TracerProvider) Shutdown(ctx context.Context) error { sps.state.Do(func() { err = sps.sp.Shutdown(ctx) }) - if err != nil { - if retErr == nil { - retErr = err - } else { - // Poor man's list of errors - retErr = fmt.Errorf("%w; %w", retErr, err) - } - } + retErr = errors.Join(retErr, err) } p.spanProcessors.Store(&spanProcessorStates{}) return retErr diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go index 81c5060ad..845e292c2 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go @@ -69,17 +69,17 @@ type traceIDRatioSampler struct { } func (ts traceIDRatioSampler) ShouldSample(p SamplingParameters) SamplingResult { - psc := trace.SpanContextFromContext(p.ParentContext) + state := trace.SpanContextFromContext(p.ParentContext).TraceState() x := binary.BigEndian.Uint64(p.TraceID[8:16]) >> 1 if x < ts.traceIDUpperBound { return SamplingResult{ Decision: RecordAndSample, - Tracestate: psc.TraceState(), + Tracestate: state, } } return SamplingResult{ Decision: Drop, - Tracestate: psc.TraceState(), + Tracestate: state, } } @@ -94,12 +94,20 @@ func (ts traceIDRatioSampler) Description() string { // //nolint:revive // revive complains about stutter of `trace.TraceIDRatioBased` func TraceIDRatioBased(fraction float64) Sampler { + // Cannot use AlwaysSample() and NeverSample(), must return spec-compliant descriptions. + // See https://opentelemetry.io/docs/specs/otel/trace/sdk/#traceidratiobased. if fraction >= 1 { - return AlwaysSample() + return predeterminedSampler{ + description: "TraceIDRatioBased{1}", + decision: RecordAndSample, + } } if fraction <= 0 { - fraction = 0 + return predeterminedSampler{ + description: "TraceIDRatioBased{0}", + decision: Drop, + } } return &traceIDRatioSampler{ @@ -118,6 +126,7 @@ func (alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult { } func (alwaysOnSampler) Description() string { + // https://opentelemetry.io/docs/specs/otel/trace/sdk/#alwayson return "AlwaysOnSampler" } @@ -139,6 +148,7 @@ func (alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult { } func (alwaysOffSampler) Description() string { + // https://opentelemetry.io/docs/specs/otel/trace/sdk/#alwaysoff return "AlwaysOffSampler" } @@ -147,6 +157,22 @@ func NeverSample() Sampler { return alwaysOffSampler{} } +type predeterminedSampler struct { + description string + decision SamplingDecision +} + +func (s predeterminedSampler) ShouldSample(p SamplingParameters) SamplingResult { + return SamplingResult{ + Decision: s.decision, + Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(), + } +} + +func (s predeterminedSampler) Description() string { + return s.description +} + // ParentBased returns a sampler decorator which behaves differently, // based on the parent of the span. If the span has no parent, // the decorated sampler is used to make sampling decision. If the span has diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index d46661059..7d55ce1dc 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -20,7 +20,7 @@ import ( "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" ) diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go index b5497c281..766731dd2 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -6,5 +6,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.40.0" + return "1.43.0" } diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/MIGRATION.md b/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/MIGRATION.md deleted file mode 100644 index fed7013e6..000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/MIGRATION.md +++ /dev/null @@ -1,78 +0,0 @@ - -# Migration from v1.38.0 to v1.39.0 - -The `go.opentelemetry.io/otel/semconv/v1.39.0` package should be a drop-in replacement for `go.opentelemetry.io/otel/semconv/v1.38.0` with the following exceptions. - -## Removed - -The following declarations have been removed. -Refer to the [OpenTelemetry Semantic Conventions documentation] for deprecation instructions. - -If the type is not listed in the documentation as deprecated, it has been removed in this version due to lack of applicability or use. -If you use any of these non-deprecated declarations in your Go application, please [open an issue] describing your use-case. - -- `LinuxMemorySlabStateKey` -- `LinuxMemorySlabStateReclaimable` -- `LinuxMemorySlabStateUnreclaimable` -- `PeerService` -- `PeerServiceKey` -- `RPCConnectRPCErrorCodeAborted` -- `RPCConnectRPCErrorCodeAlreadyExists` -- `RPCConnectRPCErrorCodeCancelled` -- `RPCConnectRPCErrorCodeDataLoss` -- `RPCConnectRPCErrorCodeDeadlineExceeded` -- `RPCConnectRPCErrorCodeFailedPrecondition` -- `RPCConnectRPCErrorCodeInternal` -- `RPCConnectRPCErrorCodeInvalidArgument` -- `RPCConnectRPCErrorCodeKey` -- `RPCConnectRPCErrorCodeNotFound` -- `RPCConnectRPCErrorCodeOutOfRange` -- `RPCConnectRPCErrorCodePermissionDenied` -- `RPCConnectRPCErrorCodeResourceExhausted` -- `RPCConnectRPCErrorCodeUnauthenticated` -- `RPCConnectRPCErrorCodeUnavailable` -- `RPCConnectRPCErrorCodeUnimplemented` -- `RPCConnectRPCErrorCodeUnknown` -- `RPCConnectRPCRequestMetadata` -- `RPCConnectRPCResponseMetadata` -- `RPCGRPCRequestMetadata` -- `RPCGRPCResponseMetadata` -- `RPCGRPCStatusCodeAborted` -- `RPCGRPCStatusCodeAlreadyExists` -- `RPCGRPCStatusCodeCancelled` -- `RPCGRPCStatusCodeDataLoss` -- `RPCGRPCStatusCodeDeadlineExceeded` -- `RPCGRPCStatusCodeFailedPrecondition` -- `RPCGRPCStatusCodeInternal` -- `RPCGRPCStatusCodeInvalidArgument` -- `RPCGRPCStatusCodeKey` -- `RPCGRPCStatusCodeNotFound` -- `RPCGRPCStatusCodeOk` -- `RPCGRPCStatusCodeOutOfRange` -- `RPCGRPCStatusCodePermissionDenied` -- `RPCGRPCStatusCodeResourceExhausted` -- `RPCGRPCStatusCodeUnauthenticated` -- `RPCGRPCStatusCodeUnavailable` -- `RPCGRPCStatusCodeUnimplemented` -- `RPCGRPCStatusCodeUnknown` -- `RPCJSONRPCErrorCode` -- `RPCJSONRPCErrorCodeKey` -- `RPCJSONRPCErrorMessage` -- `RPCJSONRPCErrorMessageKey` -- `RPCJSONRPCRequestID` -- `RPCJSONRPCRequestIDKey` -- `RPCJSONRPCVersion` -- `RPCJSONRPCVersionKey` -- `RPCService` -- `RPCServiceKey` -- `RPCSystemApacheDubbo` -- `RPCSystemConnectRPC` -- `RPCSystemDotnetWcf` -- `RPCSystemGRPC` -- `RPCSystemJSONRPC` -- `RPCSystemJavaRmi` -- `RPCSystemKey` -- `RPCSystemOncRPC` - -[OpenTelemetry Semantic Conventions documentation]: https://github.com/open-telemetry/semantic-conventions -[open an issue]: https://github.com/open-telemetry/opentelemetry-go/issues/new?template=Blank+issue diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/README.md deleted file mode 100644 index 4b0e6f7f3..000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Semconv v1.39.0 - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.39.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.39.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/MIGRATION.md b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/MIGRATION.md new file mode 100644 index 000000000..e246b1692 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/MIGRATION.md @@ -0,0 +1,27 @@ + +# Migration from v1.39.0 to v1.40.0 + +The `go.opentelemetry.io/otel/semconv/v1.40.0` package should be a drop-in replacement for `go.opentelemetry.io/otel/semconv/v1.39.0` with the following exceptions. + +## Removed + +The following declarations have been removed. +Refer to the [OpenTelemetry Semantic Conventions documentation] for deprecation instructions. + +If the type is not listed in the documentation as deprecated, it has been removed in this version due to lack of applicability or use. +If you use any of these non-deprecated declarations in your Go application, please [open an issue] describing your use-case. + +- `ErrorMessage` +- `ErrorMessageKey` +- `RPCMessageCompressedSize` +- `RPCMessageCompressedSizeKey` +- `RPCMessageID` +- `RPCMessageIDKey` +- `RPCMessageTypeKey` +- `RPCMessageTypeReceived` +- `RPCMessageTypeSent` +- `RPCMessageUncompressedSize` +- `RPCMessageUncompressedSizeKey` + +[OpenTelemetry Semantic Conventions documentation]: https://github.com/open-telemetry/semantic-conventions +[open an issue]: https://github.com/open-telemetry/opentelemetry-go/issues/new?template=Blank+issue diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/README.md new file mode 100644 index 000000000..c51b7fb7b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.40.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.40.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.40.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/attribute_group.go similarity index 95% rename from vendor/go.opentelemetry.io/otel/semconv/v1.39.0/attribute_group.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.40.0/attribute_group.go index 080365fc1..ee6b1f79d 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/attribute_group.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/attribute_group.go @@ -3,7 +3,7 @@ // Code generated from semantic convention specification. DO NOT EDIT. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.39.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.40.0" import "go.opentelemetry.io/otel/attribute" @@ -3431,7 +3431,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "a3bf90e006b2" // @@ -3467,7 +3467,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "gcr.io/opentelemetry/operator" ContainerImageNameKey = attribute.Key("container.image.name") @@ -3478,7 +3478,7 @@ const ( // // Type: string[] // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: // "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb", @@ -3497,7 +3497,7 @@ const ( // // Type: string[] // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "v1.27.1", "3.5.7-0" // @@ -3856,6 +3856,12 @@ const ( // [Generating query summary] // section. // + // For batch operations, if the individual operations are known to have the same + // query summary + // then that query summary SHOULD be used prepended by `BATCH `, + // otherwise `db.query.summary` SHOULD be `BATCH` or some other database + // system specific term if more applicable. + // // [Generating query summary]: /docs/db/database-spans.md#generating-a-summary-of-the-query DBQuerySummaryKey = attribute.Key("db.query.summary") @@ -4623,27 +4629,6 @@ func EnduserPseudoID(val string) attribute.KeyValue { // Namespace: error const ( - // ErrorMessageKey is the attribute Key conforming to the "error.message" - // semantic conventions. It represents a message providing more detail about an - // error in human-readable form. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Unexpected input type: string", "The user has exceeded their - // storage quota" - // Note: `error.message` should provide additional context and detail about an - // error. - // It is NOT RECOMMENDED to duplicate the value of `error.type` in - // `error.message`. - // It is also NOT RECOMMENDED to duplicate the value of `exception.message` in - // `error.message`. - // - // `error.message` is NOT RECOMMENDED for metrics or spans due to its unbounded - // cardinality and overlap with span status. - ErrorMessageKey = attribute.Key("error.message") - // ErrorTypeKey is the attribute Key conforming to the "error.type" semantic // conventions. It represents the describes a class of error the operation ended // with. @@ -4683,13 +4668,6 @@ const ( ErrorTypeKey = attribute.Key("error.type") ) -// ErrorMessage returns an attribute KeyValue conforming to the "error.message" -// semantic conventions. It represents a message providing more detail about an -// error in human-readable form. -func ErrorMessage(val string) attribute.KeyValue { - return ErrorMessageKey.String(val) -} - // Enum values for error.type var ( // A fallback error value to be used when the instrumentation doesn't define a @@ -4710,6 +4688,9 @@ const ( // Stability: Stable // // Examples: "Division by zero", "Can't convert 'int' object to str implicitly" + // Note: > [!WARNING] + // + // > This attribute may contain sensitive information. ExceptionMessageKey = attribute.Key("exception.message") // ExceptionStacktraceKey is the attribute Key conforming to the @@ -5165,6 +5146,19 @@ const ( // Examples: "5157782b-2203-4c80-a857-dbbd5e7761db" FeatureFlagContextIDKey = attribute.Key("feature_flag.context.id") + // FeatureFlagErrorMessageKey is the attribute Key conforming to the + // "feature_flag.error.message" semantic conventions. It represents a message + // providing more detail about an error that occurred during feature flag + // evaluation in human-readable form. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "Unexpected input type: string", "The user has exceeded their + // storage quota" + FeatureFlagErrorMessageKey = attribute.Key("feature_flag.error.message") + // FeatureFlagKeyKey is the attribute Key conforming to the "feature_flag.key" // semantic conventions. It represents the lookup key of the feature flag. // @@ -5266,6 +5260,14 @@ func FeatureFlagContextID(val string) attribute.KeyValue { return FeatureFlagContextIDKey.String(val) } +// FeatureFlagErrorMessage returns an attribute KeyValue conforming to the +// "feature_flag.error.message" semantic conventions. It represents a message +// providing more detail about an error that occurred during feature flag +// evaluation in human-readable form. +func FeatureFlagErrorMessage(val string) attribute.KeyValue { + return FeatureFlagErrorMessageKey.String(val) +} + // FeatureFlagKey returns an attribute KeyValue conforming to the // "feature_flag.key" semantic conventions. It represents the lookup key of the // feature flag. @@ -5980,6 +5982,41 @@ const ( // // [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names GCPGCEInstanceNameKey = attribute.Key("gcp.gce.instance.name") + + // GCPGCEInstanceGroupManagerNameKey is the attribute Key conforming to the + // "gcp.gce.instance_group_manager.name" semantic conventions. It represents the + // name of the Instance Group Manager (IGM) that manages this VM, if any. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "web-igm", "my-managed-group" + GCPGCEInstanceGroupManagerNameKey = attribute.Key("gcp.gce.instance_group_manager.name") + + // GCPGCEInstanceGroupManagerRegionKey is the attribute Key conforming to the + // "gcp.gce.instance_group_manager.region" semantic conventions. It represents + // the region of a **regional** Instance Group Manager (e.g., `us-central1`). + // Set this **only** when the IGM is regional. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-central1", "europe-west1" + GCPGCEInstanceGroupManagerRegionKey = attribute.Key("gcp.gce.instance_group_manager.region") + + // GCPGCEInstanceGroupManagerZoneKey is the attribute Key conforming to the + // "gcp.gce.instance_group_manager.zone" semantic conventions. It represents the + // zone of a **zonal** Instance Group Manager (e.g., `us-central1-a`). Set this + // **only** when the IGM is zonal. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-central1-a", "europe-west1-b" + GCPGCEInstanceGroupManagerZoneKey = attribute.Key("gcp.gce.instance_group_manager.zone") ) // GCPAppHubApplicationContainer returns an attribute KeyValue conforming to the @@ -6103,6 +6140,29 @@ func GCPGCEInstanceName(val string) attribute.KeyValue { return GCPGCEInstanceNameKey.String(val) } +// GCPGCEInstanceGroupManagerName returns an attribute KeyValue conforming to the +// "gcp.gce.instance_group_manager.name" semantic conventions. It represents the +// name of the Instance Group Manager (IGM) that manages this VM, if any. +func GCPGCEInstanceGroupManagerName(val string) attribute.KeyValue { + return GCPGCEInstanceGroupManagerNameKey.String(val) +} + +// GCPGCEInstanceGroupManagerRegion returns an attribute KeyValue conforming to +// the "gcp.gce.instance_group_manager.region" semantic conventions. It +// represents the region of a **regional** Instance Group Manager (e.g., +// `us-central1`). Set this **only** when the IGM is regional. +func GCPGCEInstanceGroupManagerRegion(val string) attribute.KeyValue { + return GCPGCEInstanceGroupManagerRegionKey.String(val) +} + +// GCPGCEInstanceGroupManagerZone returns an attribute KeyValue conforming to the +// "gcp.gce.instance_group_manager.zone" semantic conventions. It represents the +// zone of a **zonal** Instance Group Manager (e.g., `us-central1-a`). Set this +// **only** when the IGM is zonal. +func GCPGCEInstanceGroupManagerZone(val string) attribute.KeyValue { + return GCPGCEInstanceGroupManagerZoneKey.String(val) +} + // Enum values for gcp.apphub.service.criticality_type var ( // Mission critical service. @@ -6265,6 +6325,17 @@ const ( // Examples: "Math Tutor", "Fiction Writer" GenAIAgentNameKey = attribute.Key("gen_ai.agent.name") + // GenAIAgentVersionKey is the attribute Key conforming to the + // "gen_ai.agent.version" semantic conventions. It represents the version of the + // GenAI agent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.0.0", "2025-05-01" + GenAIAgentVersionKey = attribute.Key("gen_ai.agent.version") + // GenAIConversationIDKey is the attribute Key conforming to the // "gen_ai.conversation.id" semantic conventions. It represents the unique // identifier for a conversation (session, thread), used to store and correlate @@ -6663,6 +6734,44 @@ const ( // Examples: "gpt-4-0613" GenAIResponseModelKey = attribute.Key("gen_ai.response.model") + // GenAIRetrievalDocumentsKey is the attribute Key conforming to the + // "gen_ai.retrieval.documents" semantic conventions. It represents the + // documents retrieved. + // + // Type: any + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "[\n {\n "id": "doc_123",\n "score": 0.95\n },\n {\n "id": + // "doc_456",\n "score": 0.87\n },\n {\n "id": "doc_789",\n "score": 0.82\n + // }\n]\n" + // Note: Instrumentations MUST follow [Retrieval documents JSON schema]. + // When the attribute is recorded on events, it MUST be recorded in structured + // form. When recorded on spans, it MAY be recorded as a JSON string if + // structured + // format is not supported and SHOULD be recorded in structured form otherwise. + // + // Each document object SHOULD contain at least the following properties: + // `id` (string): A unique identifier for the document, `score` (double): The + // relevance score of the document + // + // [Retrieval documents JSON schema]: /docs/gen-ai/gen-ai-retrieval-documents.json + GenAIRetrievalDocumentsKey = attribute.Key("gen_ai.retrieval.documents") + + // GenAIRetrievalQueryTextKey is the attribute Key conforming to the + // "gen_ai.retrieval.query.text" semantic conventions. It represents the query + // text used for retrieval. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "What is the capital of France?", "weather in Paris" + // Note: > [!Warning] + // + // > This attribute may contain sensitive information. + GenAIRetrievalQueryTextKey = attribute.Key("gen_ai.retrieval.query.text") + // GenAISystemInstructionsKey is the attribute Key conforming to the // "gen_ai.system_instructions" semantic conventions. It represents the system // message or instructions provided to the GenAI model separately from the chat @@ -6837,6 +6946,30 @@ const ( // updates. GenAIToolTypeKey = attribute.Key("gen_ai.tool.type") + // GenAIUsageCacheCreationInputTokensKey is the attribute Key conforming to the + // "gen_ai.usage.cache_creation.input_tokens" semantic conventions. It + // represents the number of input tokens written to a provider-managed cache. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 25 + // Note: The value SHOULD be included in `gen_ai.usage.input_tokens`. + GenAIUsageCacheCreationInputTokensKey = attribute.Key("gen_ai.usage.cache_creation.input_tokens") + + // GenAIUsageCacheReadInputTokensKey is the attribute Key conforming to the + // "gen_ai.usage.cache_read.input_tokens" semantic conventions. It represents + // the number of input tokens served from a provider-managed cache. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 50 + // Note: The value SHOULD be included in `gen_ai.usage.input_tokens`. + GenAIUsageCacheReadInputTokensKey = attribute.Key("gen_ai.usage.cache_read.input_tokens") + // GenAIUsageInputTokensKey is the attribute Key conforming to the // "gen_ai.usage.input_tokens" semantic conventions. It represents the number of // tokens used in the GenAI input (prompt). @@ -6846,6 +6979,12 @@ const ( // Stability: Development // // Examples: 100 + // Note: This value SHOULD include all types of input tokens, including cached + // tokens. + // Instrumentations SHOULD make a best effort to populate this value, using a + // total + // provided by the provider when available or, depending on the provider API, + // by summing different token types parsed from the provider output. GenAIUsageInputTokensKey = attribute.Key("gen_ai.usage.input_tokens") // GenAIUsageOutputTokensKey is the attribute Key conforming to the @@ -6880,6 +7019,13 @@ func GenAIAgentName(val string) attribute.KeyValue { return GenAIAgentNameKey.String(val) } +// GenAIAgentVersion returns an attribute KeyValue conforming to the +// "gen_ai.agent.version" semantic conventions. It represents the version of the +// GenAI agent. +func GenAIAgentVersion(val string) attribute.KeyValue { + return GenAIAgentVersionKey.String(val) +} + // GenAIConversationID returns an attribute KeyValue conforming to the // "gen_ai.conversation.id" semantic conventions. It represents the unique // identifier for a conversation (session, thread), used to store and correlate @@ -7036,6 +7182,13 @@ func GenAIResponseModel(val string) attribute.KeyValue { return GenAIResponseModelKey.String(val) } +// GenAIRetrievalQueryText returns an attribute KeyValue conforming to the +// "gen_ai.retrieval.query.text" semantic conventions. It represents the query +// text used for retrieval. +func GenAIRetrievalQueryText(val string) attribute.KeyValue { + return GenAIRetrievalQueryTextKey.String(val) +} + // GenAIToolCallID returns an attribute KeyValue conforming to the // "gen_ai.tool.call.id" semantic conventions. It represents the tool call // identifier. @@ -7064,6 +7217,20 @@ func GenAIToolType(val string) attribute.KeyValue { return GenAIToolTypeKey.String(val) } +// GenAIUsageCacheCreationInputTokens returns an attribute KeyValue conforming to +// the "gen_ai.usage.cache_creation.input_tokens" semantic conventions. It +// represents the number of input tokens written to a provider-managed cache. +func GenAIUsageCacheCreationInputTokens(val int) attribute.KeyValue { + return GenAIUsageCacheCreationInputTokensKey.Int(val) +} + +// GenAIUsageCacheReadInputTokens returns an attribute KeyValue conforming to the +// "gen_ai.usage.cache_read.input_tokens" semantic conventions. It represents the +// number of input tokens served from a provider-managed cache. +func GenAIUsageCacheReadInputTokens(val int) attribute.KeyValue { + return GenAIUsageCacheReadInputTokensKey.Int(val) +} + // GenAIUsageInputTokens returns an attribute KeyValue conforming to the // "gen_ai.usage.input_tokens" semantic conventions. It represents the number of // tokens used in the GenAI input (prompt). @@ -7100,6 +7267,11 @@ var ( // // [OpenAI Create embeddings API]: https://platform.openai.com/docs/api-reference/embeddings/create GenAIOperationNameEmbeddings = GenAIOperationNameKey.String("embeddings") + // Retrieval operation such as [OpenAI Search Vector Store API] + // Stability: development + // + // [OpenAI Search Vector Store API]: https://platform.openai.com/docs/api-reference/vector-stores/search + GenAIOperationNameRetrieval = GenAIOperationNameKey.String("retrieval") // Create GenAI agent // Stability: development GenAIOperationNameCreateAgent = GenAIOperationNameKey.String("create_agent") @@ -7889,9 +8061,18 @@ const ( // the list of known HTTP methods. If this override is done via environment // variable, then the environment variable MUST be named // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated list of - // case-sensitive known HTTP methods - // (this list MUST be a full override of the default known method, it is not a - // list of known methods in addition to the defaults). + // case-sensitive known HTTP methods. + // + // + // If this override is done via declarative configuration, then the list MUST be + // configurable via the `known_methods` property + // (an array of case-sensitive strings with minimum items 0) under + // `.instrumentation/development.general.http.client` and/or + // `.instrumentation/development.general.http.server`. + // + // In either case, this list MUST be a full override of the default known + // methods, + // it is not a list of known methods in addition to the defaults. // // HTTP method names are case-sensitive and `http.request.method` attribute // value MUST match a known HTTP method name exactly. @@ -8845,7 +9026,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "opentelemetry-cluster" K8SClusterNameKey = attribute.Key("k8s.cluster.name") @@ -8856,7 +9037,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" // Note: K8s doesn't have support for obtaining a cluster ID. If this is ever @@ -8892,7 +9073,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "redis" K8SContainerNameKey = attribute.Key("k8s.container.name") @@ -8904,7 +9085,7 @@ const ( // // Type: int // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") @@ -8955,7 +9136,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "opentelemetry" K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") @@ -8965,7 +9146,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") @@ -8976,7 +9157,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "opentelemetry" K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") @@ -8986,7 +9167,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") @@ -8997,7 +9178,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "opentelemetry" K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") @@ -9008,7 +9189,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") @@ -9098,7 +9279,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "opentelemetry" K8SJobNameKey = attribute.Key("k8s.job.name") @@ -9108,7 +9289,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" K8SJobUIDKey = attribute.Key("k8s.job.uid") @@ -9119,7 +9300,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "default" K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") @@ -9184,7 +9365,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "node-1" K8SNodeNameKey = attribute.Key("k8s.node.name") @@ -9194,7 +9375,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2" K8SNodeUIDKey = attribute.Key("k8s.node.uid") @@ -9204,7 +9385,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "collector-gateway" // Note: The K8s Pod spec has an optional hostname field, which can be used to @@ -9224,7 +9405,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "172.18.0.2" // Note: This attribute aligns with the `podIP` field of the @@ -9238,7 +9419,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "opentelemetry-pod-autoconf" K8SPodNameKey = attribute.Key("k8s.pod.name") @@ -9249,7 +9430,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "2025-12-04T08:41:03Z" // Note: Date and time at which the object was acknowledged by the Kubelet. @@ -9293,7 +9474,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" K8SPodUIDKey = attribute.Key("k8s.pod.uid") @@ -9304,7 +9485,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "opentelemetry" K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") @@ -9315,7 +9496,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") @@ -9383,13 +9564,152 @@ const ( // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" K8SResourceQuotaUIDKey = attribute.Key("k8s.resourcequota.uid") + // K8SServiceEndpointAddressTypeKey is the attribute Key conforming to the + // "k8s.service.endpoint.address_type" semantic conventions. It represents the + // address type of the service endpoint. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "IPv4", "IPv6" + // Note: The network address family or type of the endpoint. + // This attribute aligns with the `addressType` field of the + // [K8s EndpointSlice]. + // It is used to differentiate metrics when a Service is backed by multiple + // address types + // (e.g., in dual-stack clusters). + // + // [K8s EndpointSlice]: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/endpoint-slice-v1/ + K8SServiceEndpointAddressTypeKey = attribute.Key("k8s.service.endpoint.address_type") + + // K8SServiceEndpointConditionKey is the attribute Key conforming to the + // "k8s.service.endpoint.condition" semantic conventions. It represents the + // condition of the service endpoint. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ready", "serving", "terminating" + // Note: The current operational condition of the service endpoint. + // An endpoint can have multiple conditions set at once (e.g., both `serving` + // and `terminating` during rollout). + // This attribute aligns with the condition fields in the [K8s EndpointSlice]. + // + // [K8s EndpointSlice]: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/endpoint-slice-v1/ + K8SServiceEndpointConditionKey = attribute.Key("k8s.service.endpoint.condition") + + // K8SServiceEndpointZoneKey is the attribute Key conforming to the + // "k8s.service.endpoint.zone" semantic conventions. It represents the zone of + // the service endpoint. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-east-1a", "us-west-2b", "zone-a", "" + // Note: The zone where the endpoint is located, typically corresponding to a + // failure domain. + // This attribute aligns with the `zone` field of endpoints in the + // [K8s EndpointSlice]. + // It enables zone-aware monitoring of service endpoint distribution and + // supports + // features like [Topology Aware Routing]. + // + // If the zone is not populated (e.g., nodes without the + // `topology.kubernetes.io/zone` label), + // the attribute value will be an empty string. + // + // [K8s EndpointSlice]: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/endpoint-slice-v1/ + // [Topology Aware Routing]: https://kubernetes.io/docs/concepts/services-networking/topology-aware-routing/ + K8SServiceEndpointZoneKey = attribute.Key("k8s.service.endpoint.zone") + + // K8SServiceNameKey is the attribute Key conforming to the "k8s.service.name" + // semantic conventions. It represents the name of the Service. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-service" + K8SServiceNameKey = attribute.Key("k8s.service.name") + + // K8SServicePublishNotReadyAddressesKey is the attribute Key conforming to the + // "k8s.service.publish_not_ready_addresses" semantic conventions. It represents + // the whether the Service publishes not-ready endpoints. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: true, false + // Note: Whether the Service is configured to publish endpoints before the pods + // are ready. + // This attribute is typically used to indicate that a Service (such as a + // headless + // Service for a StatefulSet) allows peer discovery before pods pass their + // readiness probes. + // It aligns with the `publishNotReadyAddresses` field of the + // [K8s ServiceSpec]. + // + // [K8s ServiceSpec]: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/#ServiceSpec + K8SServicePublishNotReadyAddressesKey = attribute.Key("k8s.service.publish_not_ready_addresses") + + // K8SServiceTrafficDistributionKey is the attribute Key conforming to the + // "k8s.service.traffic_distribution" semantic conventions. It represents the + // traffic distribution policy for the Service. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "PreferSameZone", "PreferSameNode" + // Note: Specifies how traffic is distributed to endpoints for this Service. + // This attribute aligns with the `trafficDistribution` field of the + // [K8s ServiceSpec]. + // Known values include `PreferSameZone` (prefer endpoints in the same zone as + // the client) and + // `PreferSameNode` (prefer endpoints on the same node, fallback to same zone, + // then cluster-wide). + // If this field is not set on the Service, the attribute SHOULD NOT be emitted. + // When not set, Kubernetes distributes traffic evenly across all endpoints + // cluster-wide. + // + // [K8s ServiceSpec]: https://kubernetes.io/docs/reference/networking/virtual-ips/#traffic-distribution + K8SServiceTrafficDistributionKey = attribute.Key("k8s.service.traffic_distribution") + + // K8SServiceTypeKey is the attribute Key conforming to the "k8s.service.type" + // semantic conventions. It represents the type of the Kubernetes Service. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ClusterIP", "NodePort", "LoadBalancer" + // Note: This attribute aligns with the `type` field of the + // [K8s ServiceSpec]. + // + // [K8s ServiceSpec]: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/#ServiceSpec + K8SServiceTypeKey = attribute.Key("k8s.service.type") + + // K8SServiceUIDKey is the attribute Key conforming to the "k8s.service.uid" + // semantic conventions. It represents the UID of the Service. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SServiceUIDKey = attribute.Key("k8s.service.uid") + // K8SStatefulSetNameKey is the attribute Key conforming to the // "k8s.statefulset.name" semantic conventions. It represents the name of the // StatefulSet. // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "opentelemetry" K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") @@ -9400,7 +9720,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") @@ -9803,6 +10123,64 @@ func K8SResourceQuotaUID(val string) attribute.KeyValue { return K8SResourceQuotaUIDKey.String(val) } +// K8SServiceAnnotation returns an attribute KeyValue conforming to the +// "k8s.service.annotation" semantic conventions. It represents the annotation +// placed on the Service, the `` being the annotation name, the value being +// the annotation value, even if the value is empty. +func K8SServiceAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.service.annotation."+key, val) +} + +// K8SServiceEndpointZone returns an attribute KeyValue conforming to the +// "k8s.service.endpoint.zone" semantic conventions. It represents the zone of +// the service endpoint. +func K8SServiceEndpointZone(val string) attribute.KeyValue { + return K8SServiceEndpointZoneKey.String(val) +} + +// K8SServiceLabel returns an attribute KeyValue conforming to the +// "k8s.service.label" semantic conventions. It represents the label placed on +// the Service, the `` being the label name, the value being the label +// value, even if the value is empty. +func K8SServiceLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.service.label."+key, val) +} + +// K8SServiceName returns an attribute KeyValue conforming to the +// "k8s.service.name" semantic conventions. It represents the name of the +// Service. +func K8SServiceName(val string) attribute.KeyValue { + return K8SServiceNameKey.String(val) +} + +// K8SServicePublishNotReadyAddresses returns an attribute KeyValue conforming to +// the "k8s.service.publish_not_ready_addresses" semantic conventions. It +// represents the whether the Service publishes not-ready endpoints. +func K8SServicePublishNotReadyAddresses(val bool) attribute.KeyValue { + return K8SServicePublishNotReadyAddressesKey.Bool(val) +} + +// K8SServiceSelector returns an attribute KeyValue conforming to the +// "k8s.service.selector" semantic conventions. It represents the selector +// key-value pair placed on the Service, the `` being the selector key, the +// value being the selector value. +func K8SServiceSelector(key string, val string) attribute.KeyValue { + return attribute.String("k8s.service.selector."+key, val) +} + +// K8SServiceTrafficDistribution returns an attribute KeyValue conforming to the +// "k8s.service.traffic_distribution" semantic conventions. It represents the +// traffic distribution policy for the Service. +func K8SServiceTrafficDistribution(val string) attribute.KeyValue { + return K8SServiceTrafficDistributionKey.String(val) +} + +// K8SServiceUID returns an attribute KeyValue conforming to the +// "k8s.service.uid" semantic conventions. It represents the UID of the Service. +func K8SServiceUID(val string) attribute.KeyValue { + return K8SServiceUIDKey.String(val) +} + // K8SStatefulSetAnnotation returns an attribute KeyValue conforming to the // "k8s.statefulset.annotation" semantic conventions. It represents the // annotation placed on the StatefulSet, the `` being the annotation name, @@ -9995,6 +10373,48 @@ var ( K8SPodStatusReasonUnexpectedAdmissionError = K8SPodStatusReasonKey.String("UnexpectedAdmissionError") ) +// Enum values for k8s.service.endpoint.address_type +var ( + // IPv4 address type + // Stability: development + K8SServiceEndpointAddressTypeIPv4 = K8SServiceEndpointAddressTypeKey.String("IPv4") + // IPv6 address type + // Stability: development + K8SServiceEndpointAddressTypeIPv6 = K8SServiceEndpointAddressTypeKey.String("IPv6") + // FQDN address type + // Stability: development + K8SServiceEndpointAddressTypeFqdn = K8SServiceEndpointAddressTypeKey.String("FQDN") +) + +// Enum values for k8s.service.endpoint.condition +var ( + // The endpoint is ready to receive new connections. + // Stability: development + K8SServiceEndpointConditionReady = K8SServiceEndpointConditionKey.String("ready") + // The endpoint is currently handling traffic. + // Stability: development + K8SServiceEndpointConditionServing = K8SServiceEndpointConditionKey.String("serving") + // The endpoint is in the process of shutting down. + // Stability: development + K8SServiceEndpointConditionTerminating = K8SServiceEndpointConditionKey.String("terminating") +) + +// Enum values for k8s.service.type +var ( + // ClusterIP service type + // Stability: development + K8SServiceTypeClusterIP = K8SServiceTypeKey.String("ClusterIP") + // NodePort service type + // Stability: development + K8SServiceTypeNodePort = K8SServiceTypeKey.String("NodePort") + // LoadBalancer service type + // Stability: development + K8SServiceTypeLoadBalancer = K8SServiceTypeKey.String("LoadBalancer") + // ExternalName service type + // Stability: development + K8SServiceTypeExternalName = K8SServiceTypeKey.String("ExternalName") +) + // Enum values for k8s.volume.type var ( // A [persistentVolumeClaim] volume @@ -11770,6 +12190,16 @@ func OncRPCVersion(val int) attribute.KeyValue { // Namespace: openai const ( + // OpenAIAPITypeKey is the attribute Key conforming to the "openai.api.type" + // semantic conventions. It represents the type of OpenAI API being used. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + OpenAIAPITypeKey = attribute.Key("openai.api.type") + // OpenAIRequestServiceTierKey is the attribute Key conforming to the // "openai.request.service_tier" semantic conventions. It represents the service // tier requested. May be a specific tier, default, or auto. @@ -11818,6 +12248,20 @@ func OpenAIResponseSystemFingerprint(val string) attribute.KeyValue { return OpenAIResponseSystemFingerprintKey.String(val) } +// Enum values for openai.api.type +var ( + // The OpenAI [Chat Completions API]. + // Stability: development + // + // [Chat Completions API]: https://developers.openai.com/api/reference/chat-completions/overview + OpenAIAPITypeChatCompletions = OpenAIAPITypeKey.String("chat_completions") + // The OpenAI [Responses API]. + // Stability: development + // + // [Responses API]: https://developers.openai.com/api/reference/responses/overview + OpenAIAPITypeResponses = OpenAIAPITypeKey.String("responses") +) + // Enum values for openai.request.service_tier var ( // The system will utilize scale tier credits until they are exhausted. @@ -11892,6 +12336,158 @@ var ( OpenTracingRefTypeFollowsFrom = OpenTracingRefTypeKey.String("follows_from") ) +// Namespace: oracle +const ( + // OracleDBDomainKey is the attribute Key conforming to the "oracle.db.domain" + // semantic conventions. It represents the database domain associated with the + // connection. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "example.com", "corp.internal", "prod.db.local" + // Note: This attribute SHOULD be set to the value of the `DB_DOMAIN` + // initialization parameter, + // as exposed in `v$parameter`. `DB_DOMAIN` defines the domain portion of the + // global + // database name and SHOULD be configured when a database is, or may become, + // part of a + // distributed environment. Its value consists of one or more valid identifiers + // (alphanumeric ASCII characters) separated by periods. + OracleDBDomainKey = attribute.Key("oracle.db.domain") + + // OracleDBInstanceNameKey is the attribute Key conforming to the + // "oracle.db.instance.name" semantic conventions. It represents the instance + // name associated with the connection in an Oracle Real Application Clusters + // environment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ORCL1", "ORCL2", "ORCL3" + // Note: There can be multiple instances associated with a single database + // service. It indicates the + // unique instance name to which the connection is currently bound. For non-RAC + // databases, this value + // defaults to the `oracle.db.name`. + OracleDBInstanceNameKey = attribute.Key("oracle.db.instance.name") + + // OracleDBNameKey is the attribute Key conforming to the "oracle.db.name" + // semantic conventions. It represents the database name associated with the + // connection. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ORCL1", "FREE" + // Note: This attribute SHOULD be set to the value of the parameter `DB_NAME` + // exposed in `v$parameter`. + OracleDBNameKey = attribute.Key("oracle.db.name") + + // OracleDBPdbKey is the attribute Key conforming to the "oracle.db.pdb" + // semantic conventions. It represents the pluggable database (PDB) name + // associated with the connection. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "PDB1", "FREEPDB" + // Note: This attribute SHOULD reflect the PDB that the session is currently + // connected to. + // If instrumentation cannot reliably obtain the active PDB name for each + // operation + // without issuing an additional query (such as `SELECT SYS_CONTEXT`), it is + // RECOMMENDED to fall back to the PDB name specified at connection + // establishment. + OracleDBPdbKey = attribute.Key("oracle.db.pdb") + + // OracleDBServiceKey is the attribute Key conforming to the "oracle.db.service" + // semantic conventions. It represents the service name currently associated + // with the database connection. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "order-processing-service", "db_low.adb.oraclecloud.com", + // "db_high.adb.oraclecloud.com" + // Note: The effective service name for a connection can change during its + // lifetime, + // for example after executing sql, `ALTER SESSION`. If an instrumentation + // cannot reliably + // obtain the current service name for each operation without issuing an + // additional + // query (such as `SELECT SYS_CONTEXT`), it is RECOMMENDED to fall back to the + // service name originally provided at connection establishment. + OracleDBServiceKey = attribute.Key("oracle.db.service") +) + +// OracleDBDomain returns an attribute KeyValue conforming to the +// "oracle.db.domain" semantic conventions. It represents the database domain +// associated with the connection. +func OracleDBDomain(val string) attribute.KeyValue { + return OracleDBDomainKey.String(val) +} + +// OracleDBInstanceName returns an attribute KeyValue conforming to the +// "oracle.db.instance.name" semantic conventions. It represents the instance +// name associated with the connection in an Oracle Real Application Clusters +// environment. +func OracleDBInstanceName(val string) attribute.KeyValue { + return OracleDBInstanceNameKey.String(val) +} + +// OracleDBName returns an attribute KeyValue conforming to the "oracle.db.name" +// semantic conventions. It represents the database name associated with the +// connection. +func OracleDBName(val string) attribute.KeyValue { + return OracleDBNameKey.String(val) +} + +// OracleDBPdb returns an attribute KeyValue conforming to the "oracle.db.pdb" +// semantic conventions. It represents the pluggable database (PDB) name +// associated with the connection. +func OracleDBPdb(val string) attribute.KeyValue { + return OracleDBPdbKey.String(val) +} + +// OracleDBService returns an attribute KeyValue conforming to the +// "oracle.db.service" semantic conventions. It represents the service name +// currently associated with the database connection. +func OracleDBService(val string) attribute.KeyValue { + return OracleDBServiceKey.String(val) +} + +// Namespace: oracle_cloud +const ( + // OracleCloudRealmKey is the attribute Key conforming to the + // "oracle_cloud.realm" semantic conventions. It represents the OCI realm + // identifier that indicates the isolated partition in which the tenancy and its + // resources reside. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "oc1", "oc2" + // Note: See [OCI documentation on realms] + // + // [OCI documentation on realms]: https://docs.oracle.com/iaas/Content/General/Concepts/regions.htm + OracleCloudRealmKey = attribute.Key("oracle_cloud.realm") +) + +// OracleCloudRealm returns an attribute KeyValue conforming to the +// "oracle_cloud.realm" semantic conventions. It represents the OCI realm +// identifier that indicates the isolated partition in which the tenancy and its +// resources reside. +func OracleCloudRealm(val string) attribute.KeyValue { + return OracleCloudRealmKey.String(val) +} + // Namespace: os const ( // OSBuildIDKey is the attribute Key conforming to the "os.build_id" semantic @@ -12423,6 +13019,33 @@ const ( // // Examples: "/bazinga/" PprofProfileKeepFramesKey = attribute.Key("pprof.profile.keep_frames") + + // PprofScopeDefaultSampleTypeKey is the attribute Key conforming to the + // "pprof.scope.default_sample_type" semantic conventions. It represents the + // records the pprof's default_sample_type in the original profile. Not set if + // the default sample type was missing. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cpu" + // Note: This attribute, if present, MUST be set at the scope level + // (resource_profiles[].scope_profiles[].scope.attributes[]). + PprofScopeDefaultSampleTypeKey = attribute.Key("pprof.scope.default_sample_type") + + // PprofScopeSampleTypeOrderKey is the attribute Key conforming to the + // "pprof.scope.sample_type_order" semantic conventions. It represents the + // records the indexes of the sample types in the original profile. + // + // Type: int[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3, 0, 1, 2 + // Note: This attribute, if present, MUST be set at the scope level + // (resource_profiles[].scope_profiles[].scope.attributes[]). + PprofScopeSampleTypeOrderKey = attribute.Key("pprof.scope.sample_type_order") ) // PprofLocationIsFolded returns an attribute KeyValue conforming to the @@ -12494,6 +13117,21 @@ func PprofProfileKeepFrames(val string) attribute.KeyValue { return PprofProfileKeepFramesKey.String(val) } +// PprofScopeDefaultSampleType returns an attribute KeyValue conforming to the +// "pprof.scope.default_sample_type" semantic conventions. It represents the +// records the pprof's default_sample_type in the original profile. Not set if +// the default sample type was missing. +func PprofScopeDefaultSampleType(val string) attribute.KeyValue { + return PprofScopeDefaultSampleTypeKey.String(val) +} + +// PprofScopeSampleTypeOrder returns an attribute KeyValue conforming to the +// "pprof.scope.sample_type_order" semantic conventions. It represents the +// records the indexes of the sample types in the original profile. +func PprofScopeSampleTypeOrder(val ...int) attribute.KeyValue { + return PprofScopeSampleTypeOrderKey.IntSlice(val) +} + // Namespace: process const ( // ProcessArgsCountKey is the attribute Key conforming to the @@ -13258,59 +13896,13 @@ var ( // Namespace: rpc const ( - // RPCMessageCompressedSizeKey is the attribute Key conforming to the - // "rpc.message.compressed_size" semantic conventions. It represents the - // compressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size") - - // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id" - // semantic conventions. It MUST be calculated as two different counters - // starting from `1` one for sent messages and one for received message.. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: This way we guarantee that the values will be consistent between - // different implementations. - RPCMessageIDKey = attribute.Key("rpc.message.id") - - // RPCMessageTypeKey is the attribute Key conforming to the "rpc.message.type" - // semantic conventions. It represents the whether this is a received or sent - // message. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - RPCMessageTypeKey = attribute.Key("rpc.message.type") - - // RPCMessageUncompressedSizeKey is the attribute Key conforming to the - // "rpc.message.uncompressed_size" semantic conventions. It represents the - // uncompressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size") - // RPCMethodKey is the attribute Key conforming to the "rpc.method" semantic // conventions. It represents the fully-qualified logical name of the method // from the RPC interface perspective. // // Type: string // RequirementLevel: Recommended - // Stability: Development + // Stability: Release_Candidate // // Examples: "com.example.ExampleService/exampleMethod", "EchoService/Echo", // "_OTHER" @@ -13345,7 +13937,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Development + // Stability: Release_Candidate // // Examples: "com.myservice.EchoService/catchAll", // "com.myservice.EchoService/unknownMethod", "InvalidMethod" @@ -13357,7 +13949,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Development + // Stability: Release_Candidate // // Examples: "OK", "DEADLINE_EXCEEDED", "-32602" // Note: Usually it represents an error code, but may also represent partial @@ -13373,7 +13965,7 @@ const ( // // Type: Enum // RequirementLevel: Recommended - // Stability: Development + // Stability: Release_Candidate // // Examples: // Note: The client and server RPC systems may differ for the same RPC @@ -13383,27 +13975,6 @@ const ( RPCSystemNameKey = attribute.Key("rpc.system.name") ) -// RPCMessageCompressedSize returns an attribute KeyValue conforming to the -// "rpc.message.compressed_size" semantic conventions. It represents the -// compressed size of the message in bytes. -func RPCMessageCompressedSize(val int) attribute.KeyValue { - return RPCMessageCompressedSizeKey.Int(val) -} - -// RPCMessageID returns an attribute KeyValue conforming to the "rpc.message.id" -// semantic conventions. It MUST be calculated as two different counters starting -// from `1` one for sent messages and one for received message.. -func RPCMessageID(val int) attribute.KeyValue { - return RPCMessageIDKey.Int(val) -} - -// RPCMessageUncompressedSize returns an attribute KeyValue conforming to the -// "rpc.message.uncompressed_size" semantic conventions. It represents the -// uncompressed size of the message in bytes. -func RPCMessageUncompressedSize(val int) attribute.KeyValue { - return RPCMessageUncompressedSizeKey.Int(val) -} - // RPCMethod returns an attribute KeyValue conforming to the "rpc.method" // semantic conventions. It represents the fully-qualified logical name of the // method from the RPC interface perspective. @@ -13441,25 +14012,15 @@ func RPCResponseStatusCode(val string) attribute.KeyValue { return RPCResponseStatusCodeKey.String(val) } -// Enum values for rpc.message.type -var ( - // sent - // Stability: development - RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") - // received - // Stability: development - RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") -) - // Enum values for rpc.system.name var ( // [gRPC] - // Stability: development + // Stability: release_candidate // // [gRPC]: https://grpc.io/ RPCSystemNameGRPC = RPCSystemNameKey.String("grpc") // [Apache Dubbo] - // Stability: development + // Stability: release_candidate // // [Apache Dubbo]: https://dubbo.apache.org/ RPCSystemNameDubbo = RPCSystemNameKey.String("dubbo") @@ -13674,13 +14235,28 @@ func ServerPort(val int) attribute.KeyValue { // Namespace: service const ( + // ServiceCriticalityKey is the attribute Key conforming to the + // "service.criticality" semantic conventions. It represents the operational + // criticality of the service. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "critical", "high", "medium", "low" + // Note: Application developers are encouraged to set `service.criticality` to + // express the operational importance of their services. Telemetry consumers MAY + // use this attribute to optimize telemetry collection or improve user + // experience. + ServiceCriticalityKey = attribute.Key("service.criticality") + // ServiceInstanceIDKey is the attribute Key conforming to the // "service.instance.id" semantic conventions. It represents the string ID of // the service instance. // // Type: string // RequirementLevel: Recommended - // Stability: Development + // Stability: Stable // // Examples: "627cc493-f310-47de-96bd-71410b7dec09" // Note: MUST be unique for each instance of the same @@ -13754,7 +14330,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Development + // Stability: Stable // // Examples: "Shop" // Note: A string value having a meaning that helps to distinguish a group of @@ -13856,6 +14432,29 @@ func ServiceVersion(val string) attribute.KeyValue { return ServiceVersionKey.String(val) } +// Enum values for service.criticality +var ( + // Service is business-critical; downtime directly impacts revenue, user + // experience, or core functionality. + // + // Stability: development + ServiceCriticalityCritical = ServiceCriticalityKey.String("critical") + // Service is important but has degradation tolerance or fallback mechanisms. + // + // Stability: development + ServiceCriticalityHigh = ServiceCriticalityKey.String("high") + // Service provides supplementary functionality; degradation has limited user + // impact. + // + // Stability: development + ServiceCriticalityMedium = ServiceCriticalityKey.String("medium") + // Service is non-essential to core operations; used for background tasks or + // internal tools. + // + // Stability: development + ServiceCriticalityLow = ServiceCriticalityKey.String("low") +) + // Namespace: session const ( // SessionIDKey is the attribute Key conforming to the "session.id" semantic @@ -15175,6 +15774,18 @@ const ( // // This list is subject to change over time. // + // Matching of query parameter keys against the sensitive list SHOULD be + // case-sensitive. + // + // + // Instrumentation MAY provide a way to override this list via declarative + // configuration. + // If so, it SHOULD use the `sensitive_query_parameters` property + // (an array of case-sensitive strings with minimum items 0) under + // `.instrumentation/development.general.sanitization.url`. + // This list is a full override of the default sensitive query parameter keys, + // it is not a list of keys in addition to the defaults. + // // When a query string value is redacted, the query string key SHOULD still be // preserved, e.g. // `https://www.example.com/path?color=blue&sig=REDACTED`. @@ -15250,6 +15861,17 @@ const ( // // This list is subject to change over time. // + // Matching of query parameter keys against the sensitive list SHOULD be + // case-sensitive. + // + // Instrumentation MAY provide a way to override this list via declarative + // configuration. + // If so, it SHOULD use the `sensitive_query_parameters` property + // (an array of case-sensitive strings with minimum items 0) under + // `.instrumentation/development.general.sanitization.url`. + // This list is a full override of the default sensitive query parameter keys, + // it is not a list of keys in addition to the defaults. + // // When a query string value is redacted, the query string key SHOULD still be // preserved, e.g. // `q=OpenTelemetry&sig=REDACTED`. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/doc.go similarity index 80% rename from vendor/go.opentelemetry.io/otel/semconv/v1.39.0/doc.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.40.0/doc.go index 852362ef7..c5c41e4d2 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/doc.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/doc.go @@ -4,6 +4,6 @@ // Package semconv implements OpenTelemetry semantic conventions. // // OpenTelemetry semantic conventions are agreed standardized naming -// patterns for OpenTelemetry things. This package represents the v1.39.0 +// patterns for OpenTelemetry things. This package represents the v1.40.0 // version of the OpenTelemetry semantic conventions. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.39.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.40.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/error_type.go b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/error_type.go similarity index 62% rename from vendor/go.opentelemetry.io/otel/semconv/v1.39.0/error_type.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.40.0/error_type.go index 84cf636a7..6d26e5282 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/error_type.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/error_type.go @@ -1,9 +1,10 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package semconv // import "go.opentelemetry.io/otel/semconv/v1.39.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.40.0" import ( + "errors" "reflect" "go.opentelemetry.io/otel/attribute" @@ -14,12 +15,14 @@ import ( // If err is nil, the returned attribute has the default value // [ErrorTypeOther]. // -// If err's type has the method +// If err or one of the errors in its chain has the method // // ErrorType() string // -// then the returned attribute has the value of err.ErrorType(). Otherwise, the -// returned attribute has a value derived from the concrete type of err. +// the returned attribute has that method's return value. If multiple errors in +// the chain implement this method, the value from the first match found by +// [errors.As] is used. Otherwise, the returned attribute has a value derived +// from the concrete type of err. // // The key of the returned attribute is [ErrorTypeKey]. func ErrorType(err error) attribute.KeyValue { @@ -33,8 +36,15 @@ func ErrorType(err error) attribute.KeyValue { func errorType(err error) string { var s string if et, ok := err.(interface{ ErrorType() string }); ok { - // Prioritize the ErrorType method if available. + // Fast path: check the top-level error first. s = et.ErrorType() + } else { + // Fallback: search the error chain for an ErrorType method. + var et interface{ ErrorType() string } + if errors.As(err, &et) { + // Prioritize the ErrorType method if available. + s = et.ErrorType() + } } if s == "" { // Fallback to reflection if the ErrorType method is not supported or diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/exception.go similarity index 74% rename from vendor/go.opentelemetry.io/otel/semconv/v1.39.0/exception.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.40.0/exception.go index 7b688ecc3..6a26231a1 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/exception.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/exception.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package semconv // import "go.opentelemetry.io/otel/semconv/v1.39.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.40.0" const ( // ExceptionEventName is the name of the Span event representing an exception. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/httpconv/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/httpconv/metric.go similarity index 95% rename from vendor/go.opentelemetry.io/otel/semconv/v1.39.0/httpconv/metric.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.40.0/httpconv/metric.go index cb993812a..7264925ba 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/httpconv/metric.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/httpconv/metric.go @@ -160,7 +160,10 @@ func (m ClientActiveRequests) Add( attrs ...attribute.KeyValue, ) { if len(attrs) == 0 { - m.Int64UpDownCounter.Add(ctx, incr) + m.Int64UpDownCounter.Add(ctx, incr, metric.WithAttributes( + attribute.String("server.address", serverAddress), + attribute.Int("server.port", serverPort), + )) return } @@ -174,7 +177,7 @@ func (m ClientActiveRequests) Add( *o, metric.WithAttributes( append( - attrs, + attrs[:len(attrs):len(attrs)], attribute.String("server.address", serverAddress), attribute.Int("server.port", serverPort), )..., @@ -300,7 +303,10 @@ func (m ClientConnectionDuration) Record( attrs ...attribute.KeyValue, ) { if len(attrs) == 0 { - m.Float64Histogram.Record(ctx, val) + m.Float64Histogram.Record(ctx, val, metric.WithAttributes( + attribute.String("server.address", serverAddress), + attribute.Int("server.port", serverPort), + )) return } @@ -314,7 +320,7 @@ func (m ClientConnectionDuration) Record( *o, metric.WithAttributes( append( - attrs, + attrs[:len(attrs):len(attrs)], attribute.String("server.address", serverAddress), attribute.Int("server.port", serverPort), )..., @@ -443,7 +449,11 @@ func (m ClientOpenConnections) Add( attrs ...attribute.KeyValue, ) { if len(attrs) == 0 { - m.Int64UpDownCounter.Add(ctx, incr) + m.Int64UpDownCounter.Add(ctx, incr, metric.WithAttributes( + attribute.String("http.connection.state", string(connectionState)), + attribute.String("server.address", serverAddress), + attribute.Int("server.port", serverPort), + )) return } @@ -457,7 +467,7 @@ func (m ClientOpenConnections) Add( *o, metric.WithAttributes( append( - attrs, + attrs[:len(attrs):len(attrs)], attribute.String("http.connection.state", string(connectionState)), attribute.String("server.address", serverAddress), attribute.Int("server.port", serverPort), @@ -592,7 +602,11 @@ func (m ClientRequestBodySize) Record( attrs ...attribute.KeyValue, ) { if len(attrs) == 0 { - m.Int64Histogram.Record(ctx, val) + m.Int64Histogram.Record(ctx, val, metric.WithAttributes( + attribute.String("http.request.method", string(requestMethod)), + attribute.String("server.address", serverAddress), + attribute.Int("server.port", serverPort), + )) return } @@ -606,7 +620,7 @@ func (m ClientRequestBodySize) Record( *o, metric.WithAttributes( append( - attrs, + attrs[:len(attrs):len(attrs)], attribute.String("http.request.method", string(requestMethod)), attribute.String("server.address", serverAddress), attribute.Int("server.port", serverPort), @@ -768,7 +782,11 @@ func (m ClientRequestDuration) Record( attrs ...attribute.KeyValue, ) { if len(attrs) == 0 { - m.Float64Histogram.Record(ctx, val) + m.Float64Histogram.Record(ctx, val, metric.WithAttributes( + attribute.String("http.request.method", string(requestMethod)), + attribute.String("server.address", serverAddress), + attribute.Int("server.port", serverPort), + )) return } @@ -782,7 +800,7 @@ func (m ClientRequestDuration) Record( *o, metric.WithAttributes( append( - attrs, + attrs[:len(attrs):len(attrs)], attribute.String("http.request.method", string(requestMethod)), attribute.String("server.address", serverAddress), attribute.Int("server.port", serverPort), @@ -944,7 +962,11 @@ func (m ClientResponseBodySize) Record( attrs ...attribute.KeyValue, ) { if len(attrs) == 0 { - m.Int64Histogram.Record(ctx, val) + m.Int64Histogram.Record(ctx, val, metric.WithAttributes( + attribute.String("http.request.method", string(requestMethod)), + attribute.String("server.address", serverAddress), + attribute.Int("server.port", serverPort), + )) return } @@ -958,7 +980,7 @@ func (m ClientResponseBodySize) Record( *o, metric.WithAttributes( append( - attrs, + attrs[:len(attrs):len(attrs)], attribute.String("http.request.method", string(requestMethod)), attribute.String("server.address", serverAddress), attribute.Int("server.port", serverPort), @@ -1118,7 +1140,10 @@ func (m ServerActiveRequests) Add( attrs ...attribute.KeyValue, ) { if len(attrs) == 0 { - m.Int64UpDownCounter.Add(ctx, incr) + m.Int64UpDownCounter.Add(ctx, incr, metric.WithAttributes( + attribute.String("http.request.method", string(requestMethod)), + attribute.String("url.scheme", urlScheme), + )) return } @@ -1132,7 +1157,7 @@ func (m ServerActiveRequests) Add( *o, metric.WithAttributes( append( - attrs, + attrs[:len(attrs):len(attrs)], attribute.String("http.request.method", string(requestMethod)), attribute.String("url.scheme", urlScheme), )..., @@ -1255,7 +1280,10 @@ func (m ServerRequestBodySize) Record( attrs ...attribute.KeyValue, ) { if len(attrs) == 0 { - m.Int64Histogram.Record(ctx, val) + m.Int64Histogram.Record(ctx, val, metric.WithAttributes( + attribute.String("http.request.method", string(requestMethod)), + attribute.String("url.scheme", urlScheme), + )) return } @@ -1269,7 +1297,7 @@ func (m ServerRequestBodySize) Record( *o, metric.WithAttributes( append( - attrs, + attrs[:len(attrs):len(attrs)], attribute.String("http.request.method", string(requestMethod)), attribute.String("url.scheme", urlScheme), )..., @@ -1439,7 +1467,10 @@ func (m ServerRequestDuration) Record( attrs ...attribute.KeyValue, ) { if len(attrs) == 0 { - m.Float64Histogram.Record(ctx, val) + m.Float64Histogram.Record(ctx, val, metric.WithAttributes( + attribute.String("http.request.method", string(requestMethod)), + attribute.String("url.scheme", urlScheme), + )) return } @@ -1453,7 +1484,7 @@ func (m ServerRequestDuration) Record( *o, metric.WithAttributes( append( - attrs, + attrs[:len(attrs):len(attrs)], attribute.String("http.request.method", string(requestMethod)), attribute.String("url.scheme", urlScheme), )..., @@ -1623,7 +1654,10 @@ func (m ServerResponseBodySize) Record( attrs ...attribute.KeyValue, ) { if len(attrs) == 0 { - m.Int64Histogram.Record(ctx, val) + m.Int64Histogram.Record(ctx, val, metric.WithAttributes( + attribute.String("http.request.method", string(requestMethod)), + attribute.String("url.scheme", urlScheme), + )) return } @@ -1637,7 +1671,7 @@ func (m ServerResponseBodySize) Record( *o, metric.WithAttributes( append( - attrs, + attrs[:len(attrs):len(attrs)], attribute.String("http.request.method", string(requestMethod)), attribute.String("url.scheme", urlScheme), )..., diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/otelconv/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/otelconv/metric.go similarity index 100% rename from vendor/go.opentelemetry.io/otel/semconv/v1.39.0/otelconv/metric.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.40.0/otelconv/metric.go diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/schema.go similarity index 71% rename from vendor/go.opentelemetry.io/otel/semconv/v1.39.0/schema.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.40.0/schema.go index e1a199d89..a07ffa336 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/schema.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/schema.go @@ -1,9 +1,9 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package semconv // import "go.opentelemetry.io/otel/semconv/v1.39.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.40.0" // SchemaURL is the schema URL that matches the version of the semantic conventions // that this package defines. Semconv packages starting from v1.4.0 must declare // non-empty schema URL in the form https://opentelemetry.io/schemas/ -const SchemaURL = "https://opentelemetry.io/schemas/1.39.0" +const SchemaURL = "https://opentelemetry.io/schemas/1.40.0" diff --git a/vendor/go.opentelemetry.io/otel/trace/auto.go b/vendor/go.opentelemetry.io/otel/trace/auto.go index 604fdab44..9316fd0ac 100644 --- a/vendor/go.opentelemetry.io/otel/trace/auto.go +++ b/vendor/go.opentelemetry.io/otel/trace/auto.go @@ -20,7 +20,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" "go.opentelemetry.io/otel/trace/embedded" "go.opentelemetry.io/otel/trace/internal/telemetry" ) diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go index ee6f4bcb2..e3d103c4b 100644 --- a/vendor/go.opentelemetry.io/otel/trace/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -12,6 +12,11 @@ const ( // with the sampling bit set means the span is sampled. FlagsSampled = TraceFlags(0x01) + // FlagsRandom is a bitmask with the random trace ID flag set. When + // set, it signals that the trace ID was generated randomly with at + // least 56 bits of randomness (W3C Trace Context Level 2). + FlagsRandom = TraceFlags(0x02) + errInvalidHexID errorConst = "trace-id and span-id can only contain [0-9a-f] characters, all lowercase" errInvalidTraceIDLength errorConst = "hex encoded trace-id must have length equals to 32" @@ -191,6 +196,20 @@ func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { // nolint:revive // return tf &^ FlagsSampled } +// IsRandom reports whether the random bit is set in the TraceFlags. +func (tf TraceFlags) IsRandom() bool { + return tf&FlagsRandom == FlagsRandom +} + +// WithRandom sets the random bit in a new copy of the TraceFlags. +func (tf TraceFlags) WithRandom(random bool) TraceFlags { // nolint:revive // random is not a control flag. + if random { + return tf | FlagsRandom + } + + return tf &^ FlagsRandom +} + // MarshalJSON implements a custom marshal function to encode TraceFlags // as a hex string. func (tf TraceFlags) MarshalJSON() ([]byte, error) { @@ -317,6 +336,11 @@ func (sc SpanContext) IsSampled() bool { return sc.traceFlags.IsSampled() } +// IsRandom reports whether the random bit is set in the SpanContext's TraceFlags. +func (sc SpanContext) IsRandom() bool { + return sc.traceFlags.IsRandom() +} + // WithTraceFlags returns a new SpanContext with the TraceFlags replaced. func (sc SpanContext) WithTraceFlags(flags TraceFlags) SpanContext { return SpanContext{ diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go index 073adae2f..e9cb3fd4d 100644 --- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -61,7 +61,10 @@ func checkValue(val string) bool { func checkKeyRemain(key string) bool { // ( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ) for _, v := range key { - if isAlphaNum(byte(v)) { + if v > 127 { + return false + } + if isAlphaNumASCII(v) { continue } switch v { @@ -89,7 +92,7 @@ func checkKeyPart(key string, n int) bool { return ret && checkKeyRemain(key[1:]) } -func isAlphaNum(c byte) bool { +func isAlphaNumASCII[T rune | byte](c T) bool { if c >= 'a' && c <= 'z' { return true } @@ -105,7 +108,7 @@ func checkKeyTenant(key string, n int) bool { if key == "" { return false } - return isAlphaNum(key[0]) && len(key[1:]) <= n && checkKeyRemain(key[1:]) + return isAlphaNumASCII(key[0]) && len(key[1:]) <= n && checkKeyRemain(key[1:]) } // based on the W3C Trace Context specification diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index 7c8f50803..1db4f47e4 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.40.0" + return "1.43.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 9daa2df9d..bcc6ee78a 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,7 +3,7 @@ module-sets: stable-v1: - version: v1.40.0 + version: v1.43.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus @@ -22,11 +22,11 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.62.0 + version: v0.65.0 modules: - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.16.0 + version: v0.19.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/log/logtest @@ -36,7 +36,7 @@ module-sets: - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: - version: v0.0.14 + version: v0.0.16 modules: - go.opentelemetry.io/otel/schema excluded-modules: @@ -64,3 +64,6 @@ modules: go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp: version-refs: - ./internal/version.go + go.opentelemetry.io/otel/exporters/stdout/stdoutlog: + version-refs: + - ./internal/version.go diff --git a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go index 1f8d49bc9..304f64763 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go @@ -53,6 +53,7 @@ type AnyValue struct { // *AnyValue_ArrayValue // *AnyValue_KvlistValue // *AnyValue_BytesValue + // *AnyValue_StringValueStrindex Value isAnyValue_Value `protobuf_oneof:"value"` } @@ -144,6 +145,13 @@ func (x *AnyValue) GetBytesValue() []byte { return nil } +func (x *AnyValue) GetStringValueStrindex() int32 { + if x, ok := x.GetValue().(*AnyValue_StringValueStrindex); ok { + return x.StringValueStrindex + } + return 0 +} + type isAnyValue_Value interface { isAnyValue_Value() } @@ -176,6 +184,20 @@ type AnyValue_BytesValue struct { BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"` } +type AnyValue_StringValueStrindex struct { + // Reference to the string value in ProfilesDictionary.string_table. + // + // Note: This is currently used exclusively in the Profiling signal. + // Implementers of OTLP receivers for signals other than Profiling should + // treat the presence of this value as a non-fatal issue. + // Log an error or warning indicating an unexpected field intended for the + // Profiling signal and process the data as if this value were absent or + // empty, ignoring its semantic content for the non-Profiling signal. + // + // Status: [Development] + StringValueStrindex int32 `protobuf:"varint,8,opt,name=string_value_strindex,json=stringValueStrindex,proto3,oneof"` +} + func (*AnyValue_StringValue) isAnyValue_Value() {} func (*AnyValue_BoolValue) isAnyValue_Value() {} @@ -190,6 +212,8 @@ func (*AnyValue_KvlistValue) isAnyValue_Value() {} func (*AnyValue_BytesValue) isAnyValue_Value() {} +func (*AnyValue_StringValueStrindex) isAnyValue_Value() {} + // ArrayValue is a list of AnyValue messages. We need ArrayValue as a message // since oneof in AnyValue does not allow repeated fields. type ArrayValue struct { @@ -306,9 +330,22 @@ type KeyValue struct { unknownFields protoimpl.UnknownFields // The key name of the pair. + // key_ref MUST NOT be set if key is used. Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` // The value of the pair. Value *AnyValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + // Reference to the string key in ProfilesDictionary.string_table. + // key MUST NOT be set if key_strindex is used. + // + // Note: This is currently used exclusively in the Profiling signal. + // Implementers of OTLP receivers for signals other than Profiling should + // treat the presence of this key as a non-fatal issue. + // Log an error or warning indicating an unexpected field intended for the + // Profiling signal and process the data as if this value were absent or + // empty, ignoring its semantic content for the non-Profiling signal. + // + // Status: [Development] + KeyStrindex int32 `protobuf:"varint,3,opt,name=key_strindex,json=keyStrindex,proto3" json:"key_strindex,omitempty"` } func (x *KeyValue) Reset() { @@ -357,6 +394,13 @@ func (x *KeyValue) GetValue() *AnyValue { return nil } +func (x *KeyValue) GetKeyStrindex() int32 { + if x != nil { + return x.KeyStrindex + } + return 0 +} + // InstrumentationScope is a message representing the instrumentation scope information // such as the fully qualified name and version. type InstrumentationScope struct { @@ -543,7 +587,7 @@ var file_opentelemetry_proto_common_v1_common_proto_rawDesc = []byte{ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1d, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x22, 0xe0, 0x02, 0x0a, 0x08, + 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x22, 0x96, 0x03, 0x0a, 0x08, 0x41, 0x6e, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, @@ -565,52 +609,58 @@ var file_opentelemetry_proto_common_v1_common_proto_rawDesc = []byte{ 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x6b, 0x76, 0x6c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, 0x73, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4d, - 0x0a, 0x0a, 0x41, 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3f, 0x0a, 0x06, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x6e, 0x79, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x4f, 0x0a, - 0x0c, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x3f, 0x0a, - 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, - 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, - 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x5b, - 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3d, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x6e, 0x79, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xc7, 0x01, 0x0a, 0x14, - 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, - 0x63, 0x6f, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, - 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, - 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, - 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, - 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, - 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x82, 0x01, 0x0a, 0x09, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x52, 0x65, 0x66, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x75, 0x72, - 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x55, - 0x72, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x64, 0x5f, 0x6b, 0x65, 0x79, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x69, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x12, - 0x29, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, - 0x65, 0x79, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x7b, 0x0a, 0x20, 0x69, 0x6f, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0b, - 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x28, 0x67, - 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, - 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0xaa, 0x02, 0x1d, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, - 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x15, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x13, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x42, 0x07, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4d, 0x0a, 0x0a, 0x41, 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x3f, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x41, 0x6e, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x73, 0x22, 0x4f, 0x0a, 0x0c, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4c, + 0x69, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x73, 0x22, 0x7e, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x3d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, + 0x31, 0x2e, 0x41, 0x6e, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x64, 0x65, + 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x6b, 0x65, 0x79, 0x53, 0x74, 0x72, 0x69, + 0x6e, 0x64, 0x65, 0x78, 0x22, 0xc7, 0x01, 0x0a, 0x14, 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, + 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x47, 0x0a, 0x0a, 0x61, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, + 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, + 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, + 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x82, + 0x01, 0x0a, 0x09, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x66, 0x12, 0x1d, 0x0a, 0x0a, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x55, 0x72, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x17, 0x0a, 0x07, 0x69, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x06, 0x69, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, + 0x65, 0x79, 0x73, 0x42, 0x7b, 0x0a, 0x20, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x28, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, + 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, + 0xaa, 0x02, 0x1d, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, + 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x56, 0x31, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -735,6 +785,7 @@ func file_opentelemetry_proto_common_v1_common_proto_init() { (*AnyValue_ArrayValue)(nil), (*AnyValue_KvlistValue)(nil), (*AnyValue_BytesValue)(nil), + (*AnyValue_StringValueStrindex)(nil), } type x struct{} out := protoimpl.TypeBuilder{ diff --git a/vendor/go.opentelemetry.io/proto/otlp/logs/v1/logs.pb.go b/vendor/go.opentelemetry.io/proto/otlp/logs/v1/logs.pb.go index b25abe6f6..ab284c0c0 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/logs/v1/logs.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/logs/v1/logs.pb.go @@ -40,7 +40,6 @@ const ( type SeverityNumber int32 const ( - // UNSPECIFIED is the default SeverityNumber, it MUST NOT be used. SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED SeverityNumber = 0 SeverityNumber_SEVERITY_NUMBER_TRACE SeverityNumber = 1 SeverityNumber_SEVERITY_NUMBER_TRACE2 SeverityNumber = 2 diff --git a/vendor/golang.org/x/crypto/acme/autocert/autocert.go b/vendor/golang.org/x/crypto/acme/autocert/autocert.go index cde9066f6..69461e31d 100644 --- a/vendor/golang.org/x/crypto/acme/autocert/autocert.go +++ b/vendor/golang.org/x/crypto/acme/autocert/autocert.go @@ -248,10 +248,6 @@ func (m *Manager) TLSConfig() *tls.Config { // If GetCertificate is used directly, instead of via Manager.TLSConfig, package users will // also have to add acme.ALPNProto to NextProtos for tls-alpn-01, or use HTTPHandler for http-01. func (m *Manager) GetCertificate(hello *tls.ClientHelloInfo) (*tls.Certificate, error) { - if m.Prompt == nil { - return nil, errors.New("acme/autocert: Manager.Prompt not set") - } - name := hello.ServerName if name == "" { return nil, errors.New("acme/autocert: missing server name") diff --git a/vendor/golang.org/x/crypto/acme/rfc8555.go b/vendor/golang.org/x/crypto/acme/rfc8555.go index 976b27702..1fb110e08 100644 --- a/vendor/golang.org/x/crypto/acme/rfc8555.go +++ b/vendor/golang.org/x/crypto/acme/rfc8555.go @@ -53,6 +53,9 @@ func (c *Client) registerRFC(ctx context.Context, acct *Account, prompt func(tos Contact: acct.Contact, } if c.dir.Terms != "" { + if prompt == nil { + return nil, errors.New("acme: missing Manager.Prompt to accept server's terms of service") + } req.TermsAgreed = prompt(c.dir.Terms) } diff --git a/vendor/golang.org/x/net/http2/hpack/tables.go b/vendor/golang.org/x/net/http2/hpack/tables.go index 8cbdf3f01..803fe5178 100644 --- a/vendor/golang.org/x/net/http2/hpack/tables.go +++ b/vendor/golang.org/x/net/http2/hpack/tables.go @@ -6,6 +6,7 @@ package hpack import ( "fmt" + "strings" ) // headerFieldTable implements a list of HeaderFields. @@ -54,10 +55,16 @@ func (t *headerFieldTable) len() int { // addEntry adds a new entry. func (t *headerFieldTable) addEntry(f HeaderField) { + // Prevent f from escaping to the heap. + f2 := HeaderField{ + Name: strings.Clone(f.Name), + Value: strings.Clone(f.Value), + Sensitive: f.Sensitive, + } id := uint64(t.len()) + t.evictCount + 1 - t.byName[f.Name] = id - t.byNameValue[pairNameValue{f.Name, f.Value}] = id - t.ents = append(t.ents, f) + t.byName[f2.Name] = id + t.byNameValue[pairNameValue{f2.Name, f2.Value}] = id + t.ents = append(t.ents, f2) } // evictOldest evicts the n oldest entries in the table. diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 6320f4eb4..0b99d832f 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -4,13 +4,17 @@ // Package http2 implements the HTTP/2 protocol. // -// This package is low-level and intended to be used directly by very -// few people. Most users will use it indirectly through the automatic -// use by the net/http package (from Go 1.6 and later). -// For use in earlier Go versions see ConfigureServer. (Transport support -// requires Go 1.6 or later) +// Almost no users should need to import this package directly. +// The net/http package supports HTTP/2 natively. // -// See https://http2.github.io/ for more information on HTTP/2. +// To enable or disable HTTP/2 support in net/http clients and servers, see +// [http.Transport.Protocols] and [http.Server.Protocols]. +// +// To configure HTTP/2 parameters, see +// [http.Transport.HTTP2] and [http.Server.HTTP2]. +// +// To create HTTP/1 or HTTP/2 connections, see +// [http.Transport.NewClientConn]. package http2 // import "golang.org/x/net/http2" import ( diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 7ef807f79..65da5175c 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -164,6 +164,8 @@ type Server struct { // NewWriteScheduler constructs a write scheduler for a connection. // If nil, a default scheduler is chosen. + // + // Deprecated: User-provided write schedulers are deprecated. NewWriteScheduler func() WriteScheduler // CountError, if non-nil, is called on HTTP/2 server errors. diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 8cf64b78e..19553f10c 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -712,19 +712,12 @@ func canRetryError(err error) bool { return true } if se, ok := err.(StreamError); ok { - if se.Code == ErrCodeProtocol && se.Cause == errFromPeer { - // See golang/go#47635, golang/go#42777 - return true - } return se.Code == ErrCodeRefusedStream } return false } func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) { - if t.transportTestHooks != nil { - return t.newClientConn(nil, singleUse, nil) - } host, _, err := net.SplitHostPort(addr) if err != nil { return nil, err @@ -2865,6 +2858,9 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { var seenMaxConcurrentStreams bool err := f.ForeachSetting(func(s Setting) error { + if err := s.Valid(); err != nil { + return err + } switch s.ID { case SettingMaxFrameSize: cc.maxFrameSize = s.Val @@ -2896,9 +2892,6 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { cc.henc.SetMaxDynamicTableSize(s.Val) cc.peerMaxHeaderTableSize = s.Val case SettingEnableConnectProtocol: - if err := s.Valid(); err != nil { - return err - } // If the peer wants to send us SETTINGS_ENABLE_CONNECT_PROTOCOL, // we require that it do so in the first SETTINGS frame. // @@ -3233,10 +3226,6 @@ func (gz *gzipReader) Close() error { return gz.body.Close() } -type errorReader struct{ err error } - -func (r errorReader) Read(p []byte) (int, error) { return 0, r.err } - // isConnectionCloseRequest reports whether req should use its own // connection for a single request and then close the connection. func isConnectionCloseRequest(req *http.Request) bool { diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go index 7de27be52..551545f31 100644 --- a/vendor/golang.org/x/net/http2/writesched.go +++ b/vendor/golang.org/x/net/http2/writesched.go @@ -8,6 +8,8 @@ import "fmt" // WriteScheduler is the interface implemented by HTTP/2 write schedulers. // Methods are never called concurrently. +// +// Deprecated: User-provided write schedulers are deprecated. type WriteScheduler interface { // OpenStream opens a new stream in the write scheduler. // It is illegal to call this with streamID=0 or with a streamID that is @@ -38,6 +40,8 @@ type WriteScheduler interface { } // OpenStreamOptions specifies extra options for WriteScheduler.OpenStream. +// +// Deprecated: User-provided write schedulers are deprecated. type OpenStreamOptions struct { // PusherID is zero if the stream was initiated by the client. Otherwise, // PusherID names the stream that pushed the newly opened stream. @@ -47,6 +51,8 @@ type OpenStreamOptions struct { } // FrameWriteRequest is a request to write a frame. +// +// Deprecated: User-provided write schedulers are deprecated. type FrameWriteRequest struct { // write is the interface value that does the writing, once the // WriteScheduler has selected this frame to write. The write diff --git a/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go b/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go index 7803a9261..c3d3e9bed 100644 --- a/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go +++ b/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go @@ -14,6 +14,8 @@ import ( const priorityDefaultWeightRFC7540 = 15 // 16 = 15 + 1 // PriorityWriteSchedulerConfig configures a priorityWriteScheduler. +// +// Deprecated: User-provided write schedulers are deprecated. type PriorityWriteSchedulerConfig struct { // MaxClosedNodesInTree controls the maximum number of closed streams to // retain in the priority tree. Setting this to zero saves a small amount @@ -55,6 +57,9 @@ type PriorityWriteSchedulerConfig struct { // NewPriorityWriteScheduler constructs a WriteScheduler that schedules // frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3. // If cfg is nil, default options are used. +// +// Deprecated: The RFC 7540 write scheduler has known bugs and performance issues, +// and RFC 7540 prioritization was deprecated in RFC 9113. func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler { return newPriorityWriteSchedulerRFC7540(cfg) } diff --git a/vendor/golang.org/x/net/http2/writesched_random.go b/vendor/golang.org/x/net/http2/writesched_random.go index f2e55e05c..d5d4e2214 100644 --- a/vendor/golang.org/x/net/http2/writesched_random.go +++ b/vendor/golang.org/x/net/http2/writesched_random.go @@ -10,6 +10,8 @@ import "math" // priorities. Control frames like SETTINGS and PING are written before DATA // frames, but if no control frames are queued and multiple streams have queued // HEADERS or DATA frames, Pop selects a ready stream arbitrarily. +// +// Deprecated: User-provided write schedulers are deprecated. func NewRandomWriteScheduler() WriteScheduler { return &randomWriteScheduler{sq: make(map[uint32]*writeQueue)} } diff --git a/vendor/golang.org/x/net/publicsuffix/data/children b/vendor/golang.org/x/net/publicsuffix/data/children new file mode 100644 index 000000000..1f1691308 Binary files /dev/null and b/vendor/golang.org/x/net/publicsuffix/data/children differ diff --git a/vendor/golang.org/x/net/publicsuffix/data/nodes b/vendor/golang.org/x/net/publicsuffix/data/nodes new file mode 100644 index 000000000..7ac74a1d4 Binary files /dev/null and b/vendor/golang.org/x/net/publicsuffix/data/nodes differ diff --git a/vendor/golang.org/x/net/publicsuffix/data/text b/vendor/golang.org/x/net/publicsuffix/data/text new file mode 100644 index 000000000..38f06ea9e --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/data/text @@ -0,0 +1 @@ +bonagasukeymachinebondigitaloceanspaces3-website-us-west-1bones3-website-us-west-2boomla1-plenitvedestrandiskstationcillair-traffic-controllagdenesnaaseinet-freaksakurastorageboschristmasakikuchikuseihicampinashikiminohostfoldiskussionsbereicheap-east-2bostik-serverrankoshigayachiyodaklakasamatsudoes-itjmaxxxn--12c1fe0brandisrechtrainingkpmgdbarclays3-fips-us-gov-west-1bostonakijinsekikogentlentapisa-geekarlsoyoriikarmoyoshiokanravoues3-eu-west-3botdashgabadaddjabbottjomelhus-northeast-1bouncemerckmsdsclouditchyouriparsakuratanishiwakinderoyurihonjournalistreaklinksakurawebredirectmelbourneboutiquebecologialaichaugianglassessmentsakyotanabellunoorepairbusanagochigasakishimabarakawagoeboutireserve-onlineboyfriendoftheinternetflixn--12cfi8ixb8lorenskogleezebozen-sudtirolovableprojectjxn--12co0c3b4evalleaostamayukuhashimokitayamaxarnetbankanzakiyosatokorozawap-southeast-7bozen-suedtirolovepopartindevsalangenissandoyusuharazurefdienbienishikatakayamatsushigemrstudio-prodoyolasitequipmentateshinanomachintaifun-dnshome-webservercellillesandefjordietateyamapartments3-ca-central-1bplacedogawarabikomaezakirunord-frontierepbodynathomebuiltwithdarklangevagrarmeniazurestaticappspaceusercontentproxy9guacuedaeguambulancechireadmyblogoip-dynamica-west-180recipescaracalculatorskeninjambylimanowarudaetnaamesjevuemielnogatabuseating-organicbcg123homepagexlimitedeltaitogliattips3-ap-northeast-3utilitiesmall-websozaibetsubamericanfamilydstcgroupperimo-siemenscaledekadena4ufcfaninohekinanporovnospamproxyokoteatonamidsundeportebetsukubank123kotisivultrobjectselinogradimo-i-ranamizuhobby-siteaches-yogano-ip-ddnsgurugbydgoszczecin-addrammenuorogerscblackbaudcdn-edgestackhero-networkinggroupowiat-band-campaignieznoboribetsubsc-paywhirlimodumemergencymruovatlassian-dev-buildereclaims3-ap-south-12hparasiteasypanelblagrigentobamaceiobbcn-north-123websitebuildersvp4lima-citychyattorneyagawafaicloudinedre-eiker2-deloitteastus2000123webseiteckidsmynascloudfrontendofinternet-dnsnasaarlandds3-ap-northeast-123sitewebcamauction-acornimsite164-balsan-suedtirolillyokosukanoyakage2balsfjorddnss3-accesspoint-fips3-ap-east-123paginawebadorsiteshikagamiishibechambagrice-labss3-123minsidaarborteamsterdamnserverbaniamallamazonwebservices-123miwebaccelastx4432-b-datacenterprisesakievennodebalancernfshostrowwlkpnftstorage123hjemmeside5brasiliadboxosascoli-picenord-odalovesickarpaczest-a-la-maisondre-landivtasvuodnakamurataiwanumatajimidorivnebravendbarefootballangenovarahkkeravjuh-ohtawaramotoineppueblockbusternikkoelnishikatsuragit-repostre-toteneiheijiitatebayashikaoizumizakitchenishikawazukamisatokonamerikawaueu-2bresciaogashimadachicappadovaapstecnologiazurewebsitests3-external-1bridgestonebrindisicilynxn--1ck2e1baremetalvdalipaynow-dnsdojobservablehqhaccaltanissettaikikugawaltervistablogivestbyglandroverhallaakesvuemielecceu-3broadwayusuitarumizusawabroke-itkmaxxn--1ctwolominamatargithubpreviewskrakowebview-assetsalatrobeneventochiokinoshimagentositempurlplfinancialpusercontentksatmalluccalvinklein-brb-hostingliwicebrokereportmpartsalon-1brothercules-developerauniteroirmeteorappartypo3serverevistathellebrumunddaluhanskartuzyuullensvanguardivttasvuotnakaniikawatanagurabrusselsaloonissayokoshibahikariyalibabacloudcsaltdalukoweddinglobodontexisteingeekaruizawabryanskierniewicebrynebwcloud-os-instancesaludixn--1lqs03nissedaluroyuzawabzhitomirhcloudiyclientozsdegreeclinicapitalonecliniquenoharaclothingdustdatadetectranbycngouv0cnpyatigorskiptveterinaireadymadethis-a-anarchistjordalshalsencntrani-andria-barletta-trani-andriacodespotenzagancoffeedbackanagawarszawashtenawsapprunnerdpoliticaarpharmaciensanjosoyrocommunity-prochowicecomochizukillvivanovoldacompanyantagonistockholmestrandurumisakimobetsumidanangodoesntexistmein-iservschulegallerycomparemarkerryhotelsannancomputercomsecretrosnubargainsureadthedocs-hosteditorxn--0trq7p7nnishimeraugustow-corp-staticblitzgierzgoraktyubinskaunicommuneencoreapiacenzabc01kapp-ionosegawadlugolekaascolipicenocelotennishiawakuracingheannakadomarineat-urlive-oninomiyakonojorpelandeus-canvasitebinatsukigatajiri234condoshiibabybluebitemasekd1conferenceconstruction-vaporcloudplatformshangriladeskjakamaiedge-stagingreaterconsuladobeio-static-accesscamdvrcampaniaconsultantraniandriabarlettatraniandriaconsultingrebedocapooguycontactivetrailwaycontagematsubaracontractorstababymilkashiwaraconvexecute-apictetcieszyncookingretakahatakaishimokawacooperativano-frankivskjervoyagecoprofesionalchikugodaddyn-o-saurealestatefarmerseinecorsicable-modemoneycosenzakopanecosidnsiskinkyowariasahikawasmercouchpotatofriesannoheliohostrodawaracouncil-central-1couponstackitagawassamukawatarikuzentakatairacozoracpservernamegataishinomakiloappsanokashiwazakiyosellsyourhomeftpharmacyonabaruminamiizukaminokawanishiaizubangecqldyndns-at-homedepotaruiocrankycrdyndns-at-workisboringsakershus-central-1creditcardyndns-blogsytecreditunion-webpaaskoyabenogiftsantamariakecremonasharissadistoloseyouriphdfcbankasserversembokutamakiyosunndalcrewp2cricketnedalcrimeast-kazakhstanangercrispmanagercrminamimakinfinitigooglecodebergrimstadyndns-freeboxosloisirsantoandrealtysnesanukinternationalcrotonecrowniphilipsaobernardovre-eikercrsaogoncanthoboleslawiecommerce-shopitsitecruisesaotomeldalcryptonomichiharacuiabacgiangiangrycuisinellahppictureshinordeste-idclkasukabeatsardegnarvikasumigaurayasudacuneocuritibackdropalermoarekembuchikumagayagawakkanaikawachinaganoharamcoacharitydalaheadjuegoshikibichuocutegirlfriendyndns-homednsardiniafedoraproject-studynaliasnesoddeno-stagingroks-thisayamanobearalvahkijoburgrayjayleagueschokokekscholarshipschoolbusinessebytomaridagawalmartransiphotographysiofeirafembetsukuintuitranslatefermockaszubytemarketingvollferraraferrarinuyamashinazawaferreroticahcesuolohmusashimurayamaizurunschuldockatowicefetsundyndns-remotewdyndns-iphonefossarlfgrongrossetouchijiwadediboxn--2m4a15efhvalerfilegear-sg-1filminamioguni5finalfinancefinnoyfirebaseapplinzinvestmentschulplattforminamisanrikubetsupersalevangerfirenetlibp2phutholdingsmartlabelingroundhandlingroznysaikisosakitahatakamatsukawafirenzefirestonefirmdaleilaocairtelebitbucketrzynh-servebeero-stageiseiroutingthecloudyndns-serverisignfishingokaseljeephuyenfitjarfitnessettsurugiminamitanefjalerflesbergrphxn--2scrj9caravanylvenetoeidsvollutrausercontentoyotsukaidownloadnpassenger-associationl-ams-1flickragerotikagaminordlandyndns-webhareidsbergriwataraindropikeflierneflirflogintohmalopolskanitransportefloppymntransurlfloraclegovcloudappschulserverflorencefloripadualstackatsushikabeautypedreamhosterschwarzgwesleyfloristanohatakahamalselveruminamiuonumatrixn--30rr7yflororoscrapper-sitefltrapanikolaeventscrappingrueflutterflowest1-us1-plenitravelersinsuranceflyfncarbonia-iglesias-carboniaiglesiascarboniafndyndns-wikindlegnicagliaricoharulezajskierval-d-aosta-valleyfoolfor-ourfor-somedusajscryptedyndns-worksarufutsunomiyawakasaikaitakokamikoaniikappudopaaskvolloanswatchesasayamattelemarkhangelskasuyakumodsasebofagefor-theaterfordeatnuniversitysvardoforexrotheshopwarezzoforgotdnscrysecuritytacticscwesteuropencraftravinhlonganforli-cesena-forlicesenaforlifestyleirfjordyndns1forsalesforceforsandasuolojcloud-ver-jpcargoboavistanbulsan-sudtirolutskarumaifminamifuranofortalfosneservehttpbincheonfotrdynnsassarintlon-2foxn--32vp30hachinoheavyfozfr-par-1fr-par-2franalytics-gatewayfredrikstadynservebbsaudafreedesktopazimuthaibinhphuocprapidynuddnsfreebox-osauheradyndns-mailovecollegefantasyleaguefreemyiphostyhostinguidedyn-berlincolnfreesitefreetlservehumourfreightrentin-sudtirolfrenchkisshikirkeneserveircarrdrayddns-ipatriafresenius-central-2friuli-v-giuliarafriuli-ve-giuliafriuli-vegiuliafriuli-venezia-giuliafriuli-veneziagiuliafriuli-vgiuliafriuliv-giuliafriulive-giuliafriulivegiuliafriulivenezia-giuliafriuliveneziagiuliafriulivgiuliafrlfroganserveminecraftrentin-sued-tirolfrognfrolandynuhosting-clusterfrom-akamaiorigin-staginguitarservemp3from-alfrom-arfrom-azureedgekey-stagingujaratmetacentrumbriafrom-callyfrom-cockpitrentin-suedtirolfrom-ctrentino-a-adigefrom-dcasacampinagrandebulsan-suedtiroluxenonconnectoyourafrom-debianfrom-flatangerfrom-gamvikatsuyamashikizunokuniminamiashigarafrom-hidnservep2pimientakazakinzais-a-bruinsfanfrom-iafrom-idynv6from-ilfrom-in-the-bandairtrafficplexus-2from-kservepicservequakefrom-kyfrom-lamericanexpresseljordyroyrvikingroceryfrom-malvikaufentigerfrom-mdfrom-meetrentino-aadigefrom-mifunefrom-mnfrom-modalenfrom-mservesarcasmolaquilarvikautokeinotionfrom-mtlservicebuskerudfrom-ncasertainairflowersalvadorfrom-ndfrom-nefrom-nhlfanfrom-njsevastopolitiendafrom-nminamiyamashirokawanabeepsongdalenviknagaraholtaleniwaizumiotsurugashimagazinefrom-nvalled-aostaobaolbia-tempio-olbiatempioolbialowiezachpomorskiengiangujohanamakinoharafrom-nyatomigrationidfrom-ohdancefrom-okegawatsonionjukujitawarafrom-orfrom-palmasfjordenfrom-praxihuanfrom-ris-a-bulls-fanfrom-schmidtre-gauldalfrom-sdfrom-tnfrom-txn--3bst00minanofrom-utsiracusagamiharafrom-val-daostavalleyfrom-vtrentino-alto-adigefrom-wafrom-wiardwebspace-hostorachampionshiptodayfrom-wvalledaostargetrentino-altoadigefrom-wyfrosinonefrostalowa-wolawafroyal-commissionporterfruskydivingulenfujiiderafujikawaguchikonefujiminokamoenais-a-candidatefujinomiyadatsunanjoetsulublindesnesevenassieradzfujiokazakirovogradoyfujisatoshoesewestus2fujisawafujishiroishidakabiratoridecafederation-ranchernigovallee-aosteroyfujitsuruokagoshimamurogawafujiyoshidattorelayfukayabeagleboardfukuchiyamadattoweberlevagangaviikanonjis-a-catererfukudomigawafukuis-a-celticsfanfukumitsubishigakiryuohkurafukuokakamigaharafukuroishikariwakunigamihamadavvenjargalsacefukusakisarazure-apigeefukuyamagatakaharunjargaularavellinodeobjectstoragefunabashiriuchinadavvesiidaknongunmaoris-a-chefarsundyndns-office-on-the-webflowtest-iservebloginlinefunagatakahashimamakishiwadazaifudaigoguovdageaidnunusualpersonfunahashikamiamakusatsumasendaisenergyeonggildeskaliszfundfunkfeuerfunnelsexyfuoiskujukuriyamandalfuosskodjeezfurubirafurudonordre-landfurukawaiishoppingushikamifuranore-og-uvdalfusodegaurafussagemakerfutabayamaguchinomihachimanagementrentino-s-tirolfutboldlygoingnowhere-for-more-og-romsdalfuttsurutashinais-a-conservativefsnoasakakinokiafuturecmsheezyfuturehostingxn--3ds443gzfuturemailingfvghakonehakubaclieu-1hakuis-a-cpaneliv-dnshimosuwalkis-a-cubicle-slaveroykenhakusandnessjoenhaldenhalfmoonscaleforcehalsaitamatsukuris-a-democratrentino-stirolham-radio-opocznortonkotsumomodelscapetownnews-staginghamburghammarfeastasiahamurakamigoris-a-designerhanamigawahanawahandahandcraftedugit-pages-researchedmarketplacehangglidinghangoutrentino-sud-tirolhannannestadhannoshiroomghanoipinbrowsersafetymarketshimotsukehanyuzenhappoumuginowaniihamatamakawajimangolffanshimotsumayfirstreamlitappinkddiamondshinichinanhasamazoncognito-idpdnshinjotelulucaniahasaminami-alpshinjukuleuvenicehashbanghasudahasura-appinokofuefukihaborovigoldpoint2thisamitsukehasvikfh-muensterhatenablogisticsxn--3e0b707ehatenadiaryhatinhachiojiyachtshellhatogayahabacninhbinhdinhktrentino-sudtirolhatoyamazakitakamiizumisanofidongthapmircloudnsupdaterhatsukaichikawamisatohokkaidonnakanotoddenhattfjelldalhayashimamotobusellfyis-a-doctoruncontainershinkamigotourshinshinotsupplyhazuminobushibuyahikobearblogsiteleaf-south-1helpgfoggiahelsinkitakatakanabeardubaioirasebastopoleapcellclstagehirnhemneshinshirohemsedalhepforgeblockshintokushimaheroyhetemlbfanheyflowhoswholidayhigashiagatsumagoianiahigashichichibuzentsujiiehigashihiroshimanehigashiizumozakitakyushunantankhakassiahigashikagawahigashikagurasoedahigashikawakitaaikitamiharunzenhigashikurumegurownproviderhigashimatsushimarcherkasykkylvenneslaskerrypropertieshintomikasaharahigashimatsuyamakitaakitadaitoigawahigashimurayamamotorcycleshinyoshitomiokamishihorohigashinarusells-for-lesshiojirishirifujiedahigashinehigashiomitamamurausukitamotosumy-routerhigashiosakasayamanakakogawahigashishirakawamatakanezawahigashisumiyoshikawaminamiaikitanakagusukumodenaklodzkobierzycehigashitsunotairesindevicenzamamihokksundhigashiurawa-mazowszexposeducationhercules-appioneerhigashiyamatokoriyamanashijonawatehigashiyodogawahigashiyoshinogaris-a-financialadvisor-aurdalhiphoplixn--3hcrj9cashorokanaiehippythonanywherealtorhiraizumisatokaizukakudamatsuehirakatashinagawahiranais-a-fullstackharkivallee-d-aostehirarahiratsukagawahirayahoooshikamagayaitakaokalmykiahitachiomiyakehitachiotaketakarazukaluganskharkovalleeaostehitradinghjartdalhjelmelandholyhomegoodshioyaltaketomisatoyakokonoehomeipippugliahomelinuxn--3pxu8khersonyhomesecuritymacaparecidahomesecuritypccwuozuerichardliguriahomesenseeringhomeskleppivohostinghomeunixn--41ahondahonjyoitakasagonohejis-a-geekhmelnitskiyamashikokuchuohornindalhorsells-for-usgovcloudapilottotalhortenkawahospitalhotelwithflightshirahamatonbetsupportrentino-sued-tirolhotmailhoyangerhoylandetakasakitashiobarahrsnillfjordhungyenhurdalhurumajis-a-goodyearhyllestadhyogoris-a-greenhypernodessaitokamachippubetsuikitaurahyugawarahyundaiwafuneis-not-certifiedis-savedis-slickhplayitrentinos-tirolis-uberleetrentinostirolis-very-badis-very-evillasalleitungsenis-very-goodis-very-niceis-very-sweetpepperugiais-with-thebandoomdnshisuifuettertdasnetzisk01isk02jenv-arubahcavuotnagahamaroygardengerdalp1jeonnamsosnowiecateringebumbleshrimperiajetztrentinosud-tiroljevnakerjewelryjlljls-sto1jls-sto2jls-sto365jmpiwatejnjdfirmalborkdaljouwwebhoptokigawajoyokaichibahccavuotnagaivuotnagaokakyotambabia-goraclecloudappssejny-2jozis-a-knightpointtokashikiwakuratejpmorgangwonjpncatfoodrivelandrobakamaihd-stagingloomy-gatewayjprshitaramakoseis-a-libertariankosherokuappizzakoshimizumakis-a-linux-useranishiaritabashikshacknetlifylkesbiblackfridaynightrentino-suedtirolkoshugheshizuokamitsuekosugekotohiradomainshoujis-a-llamarugame-hostrowieconomiasadogadobeioruntimedicinakanojogaszkolamdongnairlineedleasingkotourakouhokumakogenkounosunnydaykouyamassa-carrara-massacarraramassabuzzkouzushimassivegridkozagawakozakis-a-musiciankozowienkppspbarsycenterprisecloudbeesusercontentaveusercontentawktoyonakagyokutoyonezawauiusercontentdllive-websitebizenakasatsunairportashkentatamotors3-deprecatedgcaffeinehimejibxos3-eu-central-1krasnikahokutokyotangopensocialkrasnodarkredumbrellapykrelliankristiansandcatshowakristiansundkrodsheradkrokstadelvaldaostaticsigdalkropyvnytskyis-a-nascarfankrymisasaguris-a-nursells-itrentinoa-adigekumamotoyamasudakumanowtvaomoriguchiharag-cloud-charternopilawakayamafeloabatochigiehtavuoatnabudejjurkumatorinokumejimatlabgkumenanyokkaichirurgiens-dentistes-en-francekundenkunisakis-a-painterhostsolutionshiranukamisunagawakunitachiaraisaijolsterkunitomigusukukis-a-patsfankunneppubtlsiiitesilknx-serversicherungkuokgroupkomatsushimasoykurgankurobeebyteappenginekurogiminamiawajikis-a-personaltrainerkuroisoftwarendalenugkuromatsunais-a-photographermesserlikescandypoppdalkuronkurotakikawasakis-a-playershiftrentinoaadigekushirogawakustanais-a-republicanonoichinosekigaharakusupabaseoullensakerkutchanelkutnokuzumakis-a-rockstarachowicekvafjordkvalsundkvamfamplifyappchizipifony-1kvanangenkvinesdalkvinnheradkviteseidatingkvitsoykwpspdnsimple-urlmktgorymmvareservdmoliserniamombetsuppliesimplesitemonza-brianzapposirdalmonza-e-della-brianzaptomobegetmyipirangallocustomer-ocienciamonzabrianzaramonzaebrianzamonzaedellabrianzamordoviamorenarashinoharamoriyamatsumotofukemoriyoshiminamibosogndalmormonstermoroyamatsunomortgagemoscowiiheyaizuwakamatsubushikusakadogawamoseushimoichikuzenmosjoenmoskenesiskomaganemosslingmotegirlymoviemovimientonsbergmtnmtranaritakurashikis-a-socialistordalmuikaminoyamaxunison-serviceslupskomforbarrell-of-knowledgeu-central-2mukodairamunakatanemuosattemupl-wawsappspacehostedpicardmurmanskommunalforbundmurotorcraftrentinosued-tirolmusashinodesakatakatsukis-a-soxfanmuseumisawamusicampobassociateslzmutsuzawamutualmyactivedirectorymyaddrangedalmyamazeplaystation-cloudyclustersmushcdn77-sslgbtrentinosuedtirolmyasustor-elvdalmycloudnasushiobaramydattolocalcertificationmydbservermyddnskingmydissentrentinsud-tirolmydnsokamogawamydobissmarterthanyousrcfdmydsokndalmyeffectrentinsudtirolmyfastly-edgemyfirewalledreplittlestargardmyforumisconfusedmyfritzmyftpaccessolardalmyhome-servermyjinomykolaivencloud66mymailermymediapcatholicp1mynetnamegawamyokohamamatsudamypeplatter-applcube-serversusakis-a-studentalmypetsolundbeckommunemyphotoshibalena-devicesomamypigboatsomnaturalmypsxn--45br5cylmyrdbxn--45brj9caxiaskimitsubatamicrolightingloppennemysecuritycamerakermyshopblocksoowilliamhillmyshopifymyspreadshopselectrentinsued-tirolmysynologyeongnamdinhs-heilbronnoysundmytabitordermythic-beastsopotrentinsuedtirolmytis-a-bloggermytuleap-partnersor-odalmyvnchernovtsydneymywiredbladehostingpodhalepodlasiellakdnepropetrovskanlandpodzonepohlpoivronpokerpokrovskomonotteroypolkowicepoltavalle-aostavangerpolyspacepomorzeszowinbarsyonlinexus-3ponpesaro-urbino-pesarourbinopesaromasvuotnarusawapordenonepornporsangerporsangugeporsgrunnanpoznanprdprereleaserveftplockerprgmrprimeteleportrentoyookanazawaprincipenzaprivatelinkyard-cloudletsor-varangerprivatizehealthinsuranceprogressivegarsheiyufueliv-apiemontepromoldefinimaringatlangsondriobranconakamai-stagingpropertysfjordprotectionprotonettrevisohuissier-justiceprudentialpruszkowindowsservegame-serverprvcyou2-localtonetroandindependent-inquest-a-la-masionprvwineprzeworskogpunyukis-a-teacherkassyncloudpupulawypussycatanzarowinnersorfoldpvhachirogatakamoriokakegawapvtrogstadpwchiryukyuragifuchungbukharavennakaiwanairforceopzqotoyohashimotottoris-a-techietis-a-gurusgovcloudappnodeartheworkpcasinorddaluxuryqponiatowadaqsldqualifioapplumbingotembaixadaqualyhqpartnerqualyhqportalquangngais-a-therapistoiaquangninhthuanquangtritonoshonais-an-accountantshiraois-a-hard-workershirakolobrzegersundojin-dslattuminisitequickconnectroitskomorotsukamiminequicksytesorocabalestrandabergamobaragusabaerobaticketsorreisahayakawakamiichinomiyagitbookinghosteurovisionrenderquipelementsortlandquizzesorumishimatsumaebashimogosenqzzventurestaurantulaspeziavestfoldvestnesquaresinstagingvestre-slidrecifedexperts-comptablesrhtrustkaneyamazoevestre-totenris-an-anarchistorfjordvestvagoyvevelstadvfsrlvibo-valentiavibovalentiavideovinhphuchonanbungotakadaptableclercaobanglogowegroweiboliviajessheimmobilienisshingucciminamiechizeniyodogawavinnicanva-hosted-embedzin-buttervinnytsiavipsinaapplurinacionalvirginankokubunjis-an-artistorjdevcloudjiffyresdalvirtual-uservecounterstrikevirtualservervirtualuserveexchangevisakuholeckochikushinonsenasakuchinotsuchiurakawaviterboknowsitallvivianvivoryvixn--4dbgdty6choseikarugallupfizervkis-an-engineeringvlaanderenvladikavkazimierz-dolnyvladimirennesoyvlogvmitoyoakevolvologdanskonskowolayangroupixolinodeusercontentrentinosudtirolvolyngdalvoorlopervossevangenvotevotingvotoyosatoyonovpnplus-west-3vps-hostrynvusercontentunespritesoundcastripperwithgoogleapiszwithyoutubentrendhostingwiwatsukiyonotebook-fipstuff-4-salewixsitewixstudio-fipstufftoread-booksnesowawjgorawkzwloclawekonsulatinowruzhgorodwmcloudwmeloywmflabsurveyspectrumisugitolgap-north-1wnextdirectwpdevcloudwoodsideliveryworldworse-thanhphohochiminhackerwowiosrvrlessourcecraftromsakegawawpenginepoweredwphostedmailwpmucdn77-storagencywpmudevinappsusonowpsquaredwroclawsglobalacceleratorahimeshimagine-proxywtcp4wtfastly-terrariuminamiminowawwwitdkontogurawzmiuwajimaxn--54b7fta0cchoshichikashukudoyamalatvuopmicrosoftbankasaokamikitayamatsurindigenamsskoganeindustriaxn--55qw42gxn--55qx5dxn--5dbhl8dxn--5js045dxn--5rtp49chowderxn--5rtq34konyvelolipopmckinseyxn--5su34j936bgsgxn--5tzm5gxn--6btw5axn--6frz82gxn--6orx2rxn--6qq986b3xlxn--7t0a264choyodobashichinohealthcareersame-previeweirxn--80aaa0cvacationsuzakarpattiaaxn--80adxhksuzukananiimilanoticiassurgerydxn--80ao21axn--80aqecdr1axn--80asehdbasicserver-on-k3s3-me-south-1xn--80aswgxn--80audiopsysuzukis-an-actorxn--8dbq2axn--8ltr62koobindalxn--8pvr4uzhhorodxn--8y0a063axn--90a1affinitylotterybnikeeneticp0xn--90a3academiamibubbleappspotagerxn--90aeroportsinfolkebibleangaviikafjordpabianicentralus-1xn--90aishobaraoxn--90amcprequalifymeiwamizawaxn--90azhytomyradweblikes-piedmontunkoninfernovecorespeedpartnerxn--9dbq2axn--9et52uzsprytromsojampanasonichitachinakagawarmiastaplesame-appaviaxn--9krt00axn--9tfkyxn--andy-iraxn--aroport-byamembersvalbarduponthewifidelitypeformitourismilexn--asky-iraxn--aurskog-hland-jnbasilicataniaukraanghkeisenebakkeshibukawakeliwebhostingdyniakunemurorangecloudscalebookonlineustarostwodzislawdev-myqnapcloudflarecn-northwest-1xn--avery-yuasakuragawaxn--b-5gausdalxn--b4w605ferdxn--balsan-sdtirol-nsbasketballfinanzjaworznoticeableksvikapsiciliaurland-4-salernombrendlyngenflfanpachihayaakasakawaharaffleentrycloudflare-ipfstgstageorgeorgiap-southeast-4xn--bck1b9a5dre4chrome-central-1xn--bdddj-mrabdxn--bearalvhki-y4axn--berlevg-jxaxn--bhcavuotna-s4axn--bhccavuotna-k7axn--bidr-5nachikatsuuraxn--bievt-0qa2hosted-by-previderxn--bjddar-ptarnobrzegxn--blt-elabkhaziaxn--bmlo-grafana-developmentunnelmolexn--bod-2naturbruksgymnxn--bozen-sdtirol-2obihirosakikamijimatsuzakis-an-entertainerxn--brnny-wuacademy-firewall-gatewayxn--brnnysund-m8accident-investigation-aptibleadpagespeedmobilizeropschaefflerxn--brum-voagaturindalxn--btsfjord-9zaxn--bulsan-sdtirol-nsbatsfjordigickaracologneu-south-1xn--c1avgxn--c2br7gxn--c3s14mittwaldserverxn--cck2b3bauhauspostman-echofunatoriginstitutemp-dns3-object-lambda-urlolitapunkaragandaurskog-holandinggff5xn--cckwcxetdxn--cesena-forl-mcbnpparibashkiriaxn--cesenaforl-i8axn--cg4bkis-byklecznagatoromskoguchilloutsystemscloudsitevaksdalxn--ciqpnxn--clchc0ea0b2g2a9gcdxn--czr694beppublic-inquiryonagoyaustevollivingitlabbvieeemfakefurniturealtimedio-campidano-mediocampidanomediobninsk8s3-eu-north-1xn--czrs0t0xn--czru2dxn--d1acj3beskidyn-ip24xn--d1alfastlylbarrel-of-knowledgesuite-stagingivingjemnes3-globalatinabelementorayomitanobservereggio-emilia-romagnarutoolsztynsetatsunofficialivornomniwebspaceconfigma-governmentattoolforgeu-4xn--d1aturystykanieruchomoscientistreakusercontentrvarggatrysiljanewayxn--d5qv7z876chungnamdalseidfjordrrppgwangjulvikashibatakatorindustriesteinkjerxn--davvenjrga-y4axn--djrs72d6uyxn--djty4kooris-a-lawyerxn--dnna-graingerxn--drbak-wuaxn--dyry-iraxn--e1a4churchateblobanazawanggoupilefrakkestadtvsamegawaxn--eckvdtc9dxn--efvn9svchitosetogakushimotoganexn--efvy88hadanorth-kazakhstanxn--ehqz56nxn--elqq16hadselbuyshouseshimonitayanagitappwritesthisblogdnsfor-better-thanhhoamishirasatohnoshookuwanakatsugawaxn--eveni-0qa01gaxn--f6qx53axn--fct429kopervikmpspawnbaseminexn--fhbeiarnxn--finny-yuaxn--fiq228c5hsbciprianiigataipeigersundtwhitesnowflakeyword-onfabricafjsamnangerxn--fiq64bestbuyshoparenagareyamagicpatternsapporokunohealth-carereformemorialombardiademergentagents3-sa-east-1xn--fiqs8sveioxn--fiqz9svelvikongsvingerxn--fjord-lraxn--fjq720axn--fl-ziaxn--flor-jraxn--flw351exn--forl-cesena-fcbremangerxn--forlcesena-c8axn--fpcrj9c3dxn--frde-grajewolterskluwerxn--frna-woarais-certifiedxn--frya-hraxn--fzc2c9e2circleaninglugsjcbgmbhartinnxn--fzys8d69uvgmailxn--g2xx48ciscofreakadnsaliases121xn--gckr3f0fastvps-serveronakatombetsumitakagiizeaburxn--gecrj9cistrondheiminamiiseharaxn--ggaviika-8ya47haebaruericssonlanxesshimonosekikawaxn--gildeskl-g0axn--givuotna-8yanagawaxn--gjvik-wuaxn--gk3at1exn--gls-elacaixaxn--gmq050is-coolblogspotrentinoalto-adigexn--gmqw5axn--gnstigbestellen-zvbetaharanzanquangnamasteigenkainanaejrietiengiangjerdrumemsetaxiijimarnardalombardynamisches-dns3-us-east-2xn--gnstigliefern-wobiraxn--h-2failxn--h1ahnxn--h1alizxn--h2breg3evenesvn-reposphinxn--45q11cooldns-cloudflareglobalashovhackclubartowhmincommbankazoxn--h2brj9c8citadelhichisoctrangminakamichikaiseiyoichipsamparaglidingmodellingmx-central-1xn--h3cuzk1dielddanuorrittogojomediatechnologyeongbukoryokamikawanehonbetsuwanouchikuhokuryugasakis-a-liberalxn--hbmer-xqaxn--hcesuolo-7ya35bhzc66xn--hebda8bialystokkepnord-aurdalwaysdatabase44-sandboxfuseekarasjohkameyamatotakadaustrheimbamblebtimnetzgorzeleccocottemprendealstahaugesundereggio-calabriap-southeast-5xn--hery-iraxn--hgebostad-g3axn--hkkinen-5waxn--hmmrfeasta-s4accident-prevention-fleeklogesquare7xn--hnefoss-q1axn--hobl-iraxn--holtlen-hxaxn--hpmir-xqaxn--hxt814exn--hyanger-q1axn--hylandet-54axn--i1b6b1a6a2exn--imr513nxn--indery-fyanaizuxn--io0a7is-foundationxn--j1adpmnxn--j1aefauskedsmokorsetagayaseralingenoaiusercontentranoyxn--j1ael8bielawalbrzychaselfiparliamentayninhachijoinmcdireggiocalabriauth-fipsiqcxjavald-aostatichostreak-linkanumazuryokozempresashibetsukumiyamagasakinkobayashimofusagaeroclubmedecin-berlindasdaejeonbuk0emmafann-arborlanddl-o-g-i-nayoro0o0g0xn--j1amhagakhanhhoabinhduongxn--j6w193gxn--jlq480n2rgxn--jlster-byandexcloudxn--jrpeland-54axn--jvr189miuraxn--k7yn95exn--karmy-yuaxn--kbrq7oxn--kcrx77d1x4axn--kfjord-iuaxn--klbu-woaxn--klt787dxn--kltp7dxn--kltx9axn--klty5xn--4dbrk0cexn--koluokta-7ya57hagebostadxn--kprw13dxn--kpry57dxn--kput3is-gonexn--krager-gyaotsurnadalxn--kranghke-b0axn--krdsherad-m8axn--krehamn-dxaxn--krjohka-hwab49jejusgovtrafficmanagerxn--ksnes-uuaxn--kvfjord-nxaxn--kvitsy-fyasakaiminatoyotap-southeast-3xn--kvnangen-k0axn--l-1fairwindsurfbsbxn--1qqw23axn--l1accentureklamborghinikonantoshimatsusakahoginozawaonsennanmokurennebunkyonanaoshimamateramochausercontentuscanyxn--laheadju-7yasugithubusercontentushungryxn--langevg-jxaxn--lcvr32dxn--ldingen-q1axn--leagaviika-52biella-speziauthgear-stagingitpagemrappui-productions3-eu-west-1xn--lesund-huaxn--lgbbat1ad8jelasticbeanstalklabudhabikinokawabajddarvanedgecompute-1xn--lgrd-poacctfcloudflareanycastdlibestadultuvalle-daostakkomakis-an-actresshiraokamitondabayashiogamagoriziaxn--lhppi-xqaxn--linds-pratoyotomiyazakis-into-animeinforumzxn--loabt-0qaxn--lrdal-sraxn--lrenskog-54axn--lt-liaciticurus-4xn--lten-granexn--lury-iraxn--m3ch0j3axn--mely-iraxn--merker-kuaxn--mgb2ddeswidnicanva-appspjelkavikomvuxn--42c2d9axn--mgb9awbfbx-osaveincloudyndns-picsbsarpsborgripeeweeklylotteryxn--mgba3a3ejtuxfamilyxn--mgba3a4f16axn--mgba3a4fra1-dell-ogliastrapiappleyxn--mgba7c0bbn0axn--mgbaam7a8haibarakitahiroshimap-south-2xn--mgbab2bdxn--mgbah1a3hjkrdxn--mgbai9a5eva00bielskoczow-credentialless-staticblitzlgjerstadiscordsays3-us-gov-east-1xn--mgbai9azgqp6jelenia-goraxn--mgbayh7gparallelxn--mgbbh1a71exn--mgbc0a9azcgxn--mgbca7dzdoxn--mgbcpq6gpa1axn--mgberp4a5d4a87gxn--mgberp4a5d4arxn--mgbgu82axn--mgbi4ecexperimentswidnikitagatakinouexn--mgbpl2fhskosaigawaxn--mgbqly7c0a67fbcivilaviation-riopretogitsulidluyaniizaporizhzhiaxn--mgbqly7cvafricanvacode-builder-stg-builderxn--mgbt3dhdxn--mgbtf8fldrvaroyxn--mgbtx2bieszczadygeyachimataijiiyamanouchikujoinvilleirvikarasjoketokuyamarumorimachidauthgearapps-1and1xn--mgbx4cd0abogadobeaemcloud-ip6xn--mix082fbxosaves-the-whalessandria-trani-barletta-andriatranibarlettaandriaxn--mix891fedjeducatorprojectransfer-webapp-fipsavonatalxn--mjndalen-64axn--mk0axindependent-inquiryxn--mk1bu44clanbibaiduckdnsamsclubin-vpndnsamsungotsukisofukushimaniwamannordreisa-hockeynutwentertainmentoystre-slidrettozawaxn--mkru45is-into-carshiratakahagiangxn--mlatvuopmi-s4axn--mli-tlavagiskexn--mlselv-iuaxn--moreke-juaxn--mori-qsakurais-into-cartoonshishikuis-a-hunterxn--mosjen-eyasuokanmakiyokawaraxn--mot-tlavangenxn--mre-og-romsdal-qqbuserveboltuyenquangbinhthuanxn--msy-ula0haiduongxn--mtta-vrjjat-k7aflakstadaokayamazonaws-cloud9xn--muost-0qaxn--mxtq1miyazure-mobilexn--ngbc5azdxn--ngbe9e0axn--ngbrxn--4gbriminiserverxn--nit225kosakaerodromegadgets-itcouldbeworfashionstorebaseballooningroks-theatrentin-sud-tirolxn--nmesjevuemie-tcbalsan-sudtirolkuszczytnoopstmnxn--nnx388axn--nodellogliastraderxn--nqv7fs00emaxn--nry-yla5gxn--ntso0iqx3axn--ntsq17gxn--nttery-byaeservehalflifeinsurancexn--nvuotna-hwaxn--nyqy26axn--o1achernivtsienaharimakeupsunappgafanxn--o3cw4haiphongonnakayamangyshlakamaized-stagingxn--o3cyx2axn--od0algardxn--od0aq3bievathletajimabaria-vungtaudibleborkangereggioemiliaromagnarviikamiokameokamakurazakiwielunnerehabmereisenishinomiyashironomurauthordalandroidgnishiizunazukifr-1xn--ogbpf8flekkefjordxn--oppegrd-ixaxn--ostery-fyatsukannamimatakasugais-into-gamessinaplesknshisognexn--osyro-wuaxn--otu796dxn--p1acfolkswiebodzindependent-commissionxn--p1ais-leetrentinoaltoadigexn--pgbs0dhlxn--4it168dxn--porsgu-sta26fedorainfracloudfunctionsaxoxn--pssu33lxn--pssy2uxn--q7ce6axn--q9jyb4cldmail-boxn--1lqs71durbanamexnetgamersandvikcoromantovalle-d-aostavernxn--qcka1pmclerkstagexn--qqqt11miyotamanoxn--qxa6axn--qxamjondalenxn--rady-iraxn--rdal-poaxn--rde-ulazioxn--rdy-0nabaris-localplayerxn--rennesy-v1axn--rhkkervju-01afedorapeopleikangerxn--rholt-mragowoltlab-democraciaxn--rhqv96gxn--rht27zxn--rht3dxn--rht61exn--risa-5navigationxn--risr-iraxn--rland-uuaxn--rlingen-mxaxn--rmskog-byatsushiroxn--rny31hair-surveillancexn--rovu88bifukagawalesundiscordsezpisdnipropetrovskypecorindependent-paneliv-cdn77-securealmesswithdns3-us-gov-west-1xn--rros-granvindafjordxn--rskog-uuaxn--rst-0navois-lostrolekamaishimodatexn--rsta-framercanvaswinoujsciencexn--rvc1e0am3exn--ryken-vuaxn--ryrvik-byawaraxn--s-1faithainguyenxn--s9brj9clever-clouderavpagexn--sandnessjen-ogbizxn--sandy-yuaxn--sdtirol-n2axn--seral-lraxn--ses554gxn--sgne-graphicswisspockongsbergxn--skierv-utazurecontainerimakanegasakis-not-axn--skjervy-v1axn--skjk-soaxn--sknit-yqaxn--sknland-fxaxn--slat-5navuotnaroyxn--slt-elabrdns-dynamic-dnsabruzzombieidskogasawarackmazerbaijan-mayenbaidarchitectestingrok-freeddnsgeekgalaxyzxn--smla-hraxn--smna-gratangenxn--snase-nraxn--sndre-land-0cbigv-infolldalomodxn--11b4c3discountry-snowplowiczeladzw-staticblitzxn--snes-poaxn--snsa-roaxn--sr-aurdal-l8axn--sr-fron-q1axn--sr-odal-q1axn--sr-varanger-ggbiharstadotsubetsugaruhr-uni-bochumsochimkenthickarasuyamashikeu-south-2xn--srfold-byawatahamaxn--srreisa-q1axn--srum-gratis-a-bookkeepermarriottwmailxn--stfold-9xaxn--stjrdal-s1axn--stjrdalshalsen-sqbihoronobeokagakikiraraumaintenanceu1-plenittedalomzaporizhzhegurindependent-review3s3-us-west-1xn--stre-toten-zcbikedaemongolianishinoomotegoismailillehammerfeste-iparmatta-varjjathruherebungoonomutazas3-us-west-2xn--t60b56axn--tckwebthingsxn--tiq49xqyjellybeanxn--tjme-hraxn--tn0agrondarqtxn--tnsberg-q1axn--tor131oxn--trany-yuaxn--trentin-sd-tirol-rzbioxn--trentin-sdtirol-7vbirkenesoddtangentapps3-website-ap-northeast-1xn--trentino-sd-tirol-c3bittermezproxyonagunicloudiscourses3-website-ap-southeast-1xn--trentino-sdtirol-szbjerkreimdbarcelonagawakuyabukihokuizumocha-sandboxmitakeharaudnedalnishigorlicebinordkapparisor-fronishiharakrehamnishiazaibradescotaribeiraogakicks-assncf-ipfs3-ap-southeast-2ixboxeroxajuniperecreationirasakibigawaknoluoktachikawafflecellpagest-mon-blogueurodirumaceratagajobojibmdeuxfleurs3-ap-southeast-1337xn--trentinosd-tirol-rzbjugnishinoshimatsuurautoscanaryggeemrnotebooks-prodeobservableusercontentatarantoyokawap-southeast-6116-bambinagisobetsuldalpha-myqnapcloudaccess3-ap-northeast-2038xn--trentinosdtirol-7vbloombergentingjesdalondonetskaratsuginamikatagamimozaokinawashirosatobishimadridvagsoyereithuathienhueusc-de-east-1xn--trentinsd-tirol-6vblushakotanishiokoppegardiscoverdalondrinapolicevervaultjeldsundisharparochernihivgubarclaycards3-fips-us-gov-east-1xn--trentinsdtirol-nsbmoattachments3-website-ap-southeast-2xn--trgstad-r1axn--trna-woaxn--troms-zuaxn--tysvr-vraxn--uc0atvegaspydebergxn--uc0ay4axn--uist22hakatanorthflankazunotogawaxn--uisz3gxn--unjrga-rtarpitxn--unup4yxn--uuwu58axn--vads-jraxn--valle-aoste-ebbtxn--valle-d-aoste-ehboehringerikerxn--valleaoste-e7axn--valledaoste-ebbvadsoccerxn--vard-jraxn--vegrshei-c0axn--vermgensberater-ctb-hostingxn--vermgensberatung-pwbms3-website-eu-west-1xn--vestvgy-ixa6oxn--vg-yiabmwcloudnonproddagestangevje-og-hornnes3-website-sa-east-1xn--vgan-qoaxn--vgsy-qoa0j0xn--vgu402cleverappsangotpantheonsitexn--vhquvelvetuckerxn--vler-qoaxn--vre-eiker-k8axn--vrggt-xqadxn--vry-yla5gxn--vuq861bnrweatherchannelsdvrdns3-website-us-east-1xn--w4r85el8fhu5dnraxn--w4rs40lxn--wcvs22dxn--wgbh1clickrisinglesjaguarvodkafkashiharaxn--wgbl6axn--xhq521bolognagasakikonaircraftraeumtgeradealerdalcest-le-patron-forgerockyotobetsucks3-website-us-gov-west-1xn--xkc2al3hye2axn--xkc2dl3a5ee0hakodatexn--y9a3aquarellebesbyencowayxn--yer-znavyxn--yfro4i67oxn--ygarden-p1axn--ygbi2ammxn--4it797kontumintshizukuishimojis-a-landscaperspectakashimarshallstatebankhmelnytskyivalleedaostexn--ystre-slidre-ujbolzano-altoadigextraspace-to-rentalstomakomaibaravocats3-eu-west-2xn--zbx025dxn--zf0avxn--4pvxs4allxn--zfr164bomlodingenishitosashimizunaminamidaitomanaustdalopparachutingjovikareliancexnbayernxtooldevicexz \ No newline at end of file diff --git a/vendor/golang.org/x/net/publicsuffix/list.go b/vendor/golang.org/x/net/publicsuffix/list.go new file mode 100644 index 000000000..7ab8b3cf1 --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/list.go @@ -0,0 +1,210 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go + +// Package publicsuffix provides a public suffix list based on data from +// https://publicsuffix.org/ +// +// A public suffix is one under which Internet users can directly register +// names. It is related to, but different from, a TLD (top level domain). +// +// "com" is a TLD (top level domain). Top level means it has no dots. +// +// "com" is also a public suffix. Amazon and Google have registered different +// siblings under that domain: "amazon.com" and "google.com". +// +// "au" is another TLD, again because it has no dots. But it's not "amazon.au". +// Instead, it's "amazon.com.au". +// +// "com.au" isn't an actual TLD, because it's not at the top level (it has +// dots). But it is an eTLD (effective TLD), because that's the branching point +// for domain name registrars. +// +// Another name for "an eTLD" is "a public suffix". Often, what's more of +// interest is the eTLD+1, or one more label than the public suffix. For +// example, browsers partition read/write access to HTTP cookies according to +// the eTLD+1. Web pages served from "amazon.com.au" can't read cookies from +// "google.com.au", but web pages served from "maps.google.com" can share +// cookies from "www.google.com", so you don't have to sign into Google Maps +// separately from signing into Google Web Search. Note that all four of those +// domains have 3 labels and 2 dots. The first two domains are each an eTLD+1, +// the last two are not (but share the same eTLD+1: "google.com"). +// +// All of these domains have the same eTLD+1: +// - "www.books.amazon.co.uk" +// - "books.amazon.co.uk" +// - "amazon.co.uk" +// +// Specifically, the eTLD+1 is "amazon.co.uk", because the eTLD is "co.uk". +// +// There is no closed form algorithm to calculate the eTLD of a domain. +// Instead, the calculation is data driven. This package provides a +// pre-compiled snapshot of Mozilla's PSL (Public Suffix List) data at +// https://publicsuffix.org/ +package publicsuffix // import "golang.org/x/net/publicsuffix" + +// TODO: specify case sensitivity and leading/trailing dot behavior for +// func PublicSuffix and func EffectiveTLDPlusOne. + +import ( + "fmt" + "net/http/cookiejar" + "net/netip" + "strings" +) + +// List implements the cookiejar.PublicSuffixList interface by calling the +// PublicSuffix function. +var List cookiejar.PublicSuffixList = list{} + +type list struct{} + +func (list) PublicSuffix(domain string) string { + ps, _ := PublicSuffix(domain) + return ps +} + +func (list) String() string { + return version +} + +// PublicSuffix returns the public suffix of the domain using a copy of the +// publicsuffix.org database compiled into the library. +// +// icann is whether the public suffix is managed by the Internet Corporation +// for Assigned Names and Numbers. If not, the public suffix is either a +// privately managed domain (and in practice, not a top level domain) or an +// unmanaged top level domain (and not explicitly mentioned in the +// publicsuffix.org list). For example, "foo.org" and "foo.co.uk" are ICANN +// domains, "foo.dyndns.org" is a private domain and +// "cromulent" is an unmanaged top level domain. +// +// Use cases for distinguishing ICANN domains like "foo.com" from private +// domains like "foo.appspot.com" can be found at +// https://wiki.mozilla.org/Public_Suffix_List/Use_Cases +func PublicSuffix(domain string) (publicSuffix string, icann bool) { + if _, err := netip.ParseAddr(domain); err == nil { + return domain, false + } + + lo, hi := uint32(0), uint32(numTLD) + s, suffix, icannNode, wildcard := domain, len(domain), false, false +loop: + for { + dot := strings.LastIndexByte(s, '.') + if wildcard { + icann = icannNode + suffix = 1 + dot + } + if lo == hi { + break + } + f := find(s[1+dot:], lo, hi) + if f == notFound { + break + } + + u := uint32(nodes.get(f) >> (nodesBitsTextOffset + nodesBitsTextLength)) + icannNode = u&(1<>= nodesBitsICANN + u = children.get(u & (1<>= childrenBitsLo + hi = u & (1<>= childrenBitsHi + switch u & (1<>= childrenBitsNodeType + wildcard = u&(1<>= nodesBitsTextLength + offset := x & (1<(SB),NOSPLIT,$0-0 + JMP libc_sysctlbyname(SB) +GLOBL ·libc_sysctlbyname_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctlbyname_trampoline_addr(SB)/8, $libc_sysctlbyname_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go index 6d8eb784b..5fc09e293 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.go @@ -44,14 +44,11 @@ func initOptions() { } func archInit() { - switch runtime.GOOS { - case "freebsd": + if runtime.GOOS == "freebsd" { readARM64Registers() - case "linux", "netbsd", "openbsd", "windows": + } else { + // Most platforms don't seem to allow directly reading these registers. doinit() - default: - // Many platforms don't seem to allow reading these registers. - setMinimalFeatures() } } diff --git a/vendor/golang.org/x/sys/cpu/cpu_darwin_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_darwin_arm64.go new file mode 100644 index 000000000..0b470744a --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_darwin_arm64.go @@ -0,0 +1,67 @@ +// Copyright 2026 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin && arm64 && gc + +package cpu + +func doinit() { + setMinimalFeatures() + + // The feature flags are explained in [Instruction Set Detection]. + // There are some differences between MacOS versions: + // + // MacOS 11 and 12 do not have "hw.optional" sysctl values for some of the features. + // + // MacOS 13 changed some of the naming conventions to align with ARM Architecture Reference Manual. + // For example "hw.optional.armv8_2_sha512" became "hw.optional.arm.FEAT_SHA512". + // It currently checks both to stay compatible with MacOS 11 and 12. + // The old names also work with MacOS 13, however it's not clear whether + // they will continue working with future OS releases. + // + // Once MacOS 12 is no longer supported the old names can be removed. + // + // [Instruction Set Detection]: https://developer.apple.com/documentation/kernel/1387446-sysctlbyname/determining_instruction_set_characteristics + + // Encryption, hashing and checksum capabilities + + // For the following flags there are no MacOS 11 sysctl flags. + ARM64.HasAES = true || darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_AES\x00")) + ARM64.HasPMULL = true || darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_PMULL\x00")) + ARM64.HasSHA1 = true || darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_SHA1\x00")) + ARM64.HasSHA2 = true || darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_SHA256\x00")) + + ARM64.HasSHA3 = darwinSysctlEnabled([]byte("hw.optional.armv8_2_sha3\x00")) || darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_SHA3\x00")) + ARM64.HasSHA512 = darwinSysctlEnabled([]byte("hw.optional.armv8_2_sha512\x00")) || darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_SHA512\x00")) + + ARM64.HasCRC32 = darwinSysctlEnabled([]byte("hw.optional.armv8_crc32\x00")) + + // Atomic and memory ordering + ARM64.HasATOMICS = darwinSysctlEnabled([]byte("hw.optional.armv8_1_atomics\x00")) || darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_LSE\x00")) + ARM64.HasLRCPC = darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_LRCPC\x00")) + + // SIMD and floating point capabilities + ARM64.HasFPHP = darwinSysctlEnabled([]byte("hw.optional.neon_fp16\x00")) || darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_FP16\x00")) + ARM64.HasASIMDHP = darwinSysctlEnabled([]byte("hw.optional.neon_hpfp\x00")) || darwinSysctlEnabled([]byte("hw.optional.AdvSIMD_HPFPCvt\x00")) + ARM64.HasASIMDRDM = darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_RDM\x00")) + ARM64.HasASIMDDP = darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_DotProd\x00")) + ARM64.HasASIMDFHM = darwinSysctlEnabled([]byte("hw.optional.armv8_2_fhm\x00")) || darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_FHM\x00")) + ARM64.HasI8MM = darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_I8MM\x00")) + + ARM64.HasJSCVT = darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_JSCVT\x00")) + ARM64.HasFCMA = darwinSysctlEnabled([]byte("hw.optional.armv8_3_compnum\x00")) || darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_FCMA\x00")) + + // Miscellaneous + ARM64.HasDCPOP = darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_DPB\x00")) + ARM64.HasEVTSTRM = darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_ECV\x00")) + ARM64.HasDIT = darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_DIT\x00")) + + // Not supported, but added for completeness + ARM64.HasCPUID = false + + ARM64.HasSM3 = false // darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_SM3\x00")) + ARM64.HasSM4 = false // darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_SM4\x00")) + ARM64.HasSVE = false // darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_SVE\x00")) + ARM64.HasSVE2 = false // darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_SVE2\x00")) +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_darwin_arm64_other.go b/vendor/golang.org/x/sys/cpu/cpu_darwin_arm64_other.go new file mode 100644 index 000000000..37ecc6644 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_darwin_arm64_other.go @@ -0,0 +1,31 @@ +// Copyright 2026 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin && arm64 && !gc + +package cpu + +import "runtime" + +func doinit() { + setMinimalFeatures() + + ARM64.HasASIMD = true + ARM64.HasFP = true + + // Go already assumes these to be available because they were on the M1 + // and these are supported on all Apple arm64 chips. + ARM64.HasAES = true + ARM64.HasPMULL = true + ARM64.HasSHA1 = true + ARM64.HasSHA2 = true + + if runtime.GOOS != "ios" { + // Apple A7 processors do not support these, however + // M-series SoCs are at least armv8.4-a + ARM64.HasCRC32 = true // armv8.1 + ARM64.HasATOMICS = true // armv8.2 + ARM64.HasJSCVT = true // armv8.3, if HasFP + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go index 7f1946780..05913081e 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go @@ -9,3 +9,4 @@ package cpu func getisar0() uint64 { return 0 } func getisar1() uint64 { return 0 } func getpfr0() uint64 { return 0 } +func getzfr0() uint64 { return 0 } diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go index ff74d7afa..53f814d7a 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go @@ -2,8 +2,10 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !linux && !netbsd && !openbsd && !windows && arm64 +//go:build !darwin && !linux && !netbsd && !openbsd && arm64 package cpu -func doinit() {} +func doinit() { + setMinimalFeatures() +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_windows_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_windows_arm64.go deleted file mode 100644 index d09e85a36..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_windows_arm64.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2026 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -import ( - "golang.org/x/sys/windows" -) - -func doinit() { - // set HasASIMD and HasFP to true as per - // https://learn.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions?view=msvc-170#base-requirements - // - // The ARM64 version of Windows always presupposes that it's running on an ARMv8 or later architecture. - // Both floating-point and NEON support are presumed to be present in hardware. - // - ARM64.HasASIMD = true - ARM64.HasFP = true - - if windows.IsProcessorFeaturePresent(windows.PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE) { - ARM64.HasAES = true - ARM64.HasPMULL = true - ARM64.HasSHA1 = true - ARM64.HasSHA2 = true - } - ARM64.HasSHA3 = windows.IsProcessorFeaturePresent(windows.PF_ARM_SHA3_INSTRUCTIONS_AVAILABLE) - ARM64.HasCRC32 = windows.IsProcessorFeaturePresent(windows.PF_ARM_V8_CRC32_INSTRUCTIONS_AVAILABLE) - ARM64.HasSHA512 = windows.IsProcessorFeaturePresent(windows.PF_ARM_SHA512_INSTRUCTIONS_AVAILABLE) - ARM64.HasATOMICS = windows.IsProcessorFeaturePresent(windows.PF_ARM_V81_ATOMIC_INSTRUCTIONS_AVAILABLE) - if windows.IsProcessorFeaturePresent(windows.PF_ARM_V82_DP_INSTRUCTIONS_AVAILABLE) { - ARM64.HasASIMDDP = true - ARM64.HasASIMDRDM = true - } - if windows.IsProcessorFeaturePresent(windows.PF_ARM_V83_LRCPC_INSTRUCTIONS_AVAILABLE) { - ARM64.HasLRCPC = true - ARM64.HasSM3 = true - } - ARM64.HasSVE = windows.IsProcessorFeaturePresent(windows.PF_ARM_SVE_INSTRUCTIONS_AVAILABLE) - ARM64.HasSVE2 = windows.IsProcessorFeaturePresent(windows.PF_ARM_SVE2_INSTRUCTIONS_AVAILABLE) - ARM64.HasJSCVT = windows.IsProcessorFeaturePresent(windows.PF_ARM_V83_JSCVT_INSTRUCTIONS_AVAILABLE) -} diff --git a/vendor/golang.org/x/sys/cpu/syscall_darwin_arm64_gc.go b/vendor/golang.org/x/sys/cpu/syscall_darwin_arm64_gc.go new file mode 100644 index 000000000..7b4e67ff9 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/syscall_darwin_arm64_gc.go @@ -0,0 +1,54 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Minimal copy from internal/cpu and runtime to make sysctl calls. + +//go:build darwin && arm64 && gc + +package cpu + +import ( + "syscall" + "unsafe" +) + +type Errno = syscall.Errno + +// adapted from internal/cpu/cpu_arm64_darwin.go +func darwinSysctlEnabled(name []byte) bool { + out := int32(0) + nout := unsafe.Sizeof(out) + if ret := sysctlbyname(&name[0], (*byte)(unsafe.Pointer(&out)), &nout, nil, 0); ret != nil { + return false + } + return out > 0 +} + +//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" + +var libc_sysctlbyname_trampoline_addr uintptr + +// adapted from runtime/sys_darwin.go in the pattern of sysctl() above, as defined in x/sys/unix +func sysctlbyname(name *byte, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { + if _, _, err := syscall_syscall6( + libc_sysctlbyname_trampoline_addr, + uintptr(unsafe.Pointer(name)), + uintptr(unsafe.Pointer(old)), + uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), + uintptr(newlen), + 0, + ); err != 0 { + return err + } + + return nil +} + +//go:cgo_import_dynamic libc_sysctlbyname sysctlbyname "/usr/lib/libSystem.B.dylib" + +// Implemented in the runtime package (runtime/sys_darwin.go) +func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) + +//go:linkname syscall_syscall6 syscall.syscall6 diff --git a/vendor/golang.org/x/sys/plan9/syscall_plan9.go b/vendor/golang.org/x/sys/plan9/syscall_plan9.go index d079d8116..761912237 100644 --- a/vendor/golang.org/x/sys/plan9/syscall_plan9.go +++ b/vendor/golang.org/x/sys/plan9/syscall_plan9.go @@ -19,13 +19,7 @@ import ( // A Note is a string describing a process note. // It implements the os.Signal interface. -type Note string - -func (n Note) Signal() {} - -func (n Note) String() string { - return string(n) -} +type Note = syscall.Note var ( Stdin = 0 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index c1a467017..45476a73c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -593,110 +593,115 @@ const ( ) const ( - NDA_UNSPEC = 0x0 - NDA_DST = 0x1 - NDA_LLADDR = 0x2 - NDA_CACHEINFO = 0x3 - NDA_PROBES = 0x4 - NDA_VLAN = 0x5 - NDA_PORT = 0x6 - NDA_VNI = 0x7 - NDA_IFINDEX = 0x8 - NDA_MASTER = 0x9 - NDA_LINK_NETNSID = 0xa - NDA_SRC_VNI = 0xb - NTF_USE = 0x1 - NTF_SELF = 0x2 - NTF_MASTER = 0x4 - NTF_PROXY = 0x8 - NTF_EXT_LEARNED = 0x10 - NTF_OFFLOADED = 0x20 - NTF_ROUTER = 0x80 - NUD_INCOMPLETE = 0x1 - NUD_REACHABLE = 0x2 - NUD_STALE = 0x4 - NUD_DELAY = 0x8 - NUD_PROBE = 0x10 - NUD_FAILED = 0x20 - NUD_NOARP = 0x40 - NUD_PERMANENT = 0x80 - NUD_NONE = 0x0 - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFA_FLAGS = 0x8 - IFA_RT_PRIORITY = 0x9 - IFA_TARGET_NETNSID = 0xa - IFAL_LABEL = 0x2 - IFAL_ADDRESS = 0x1 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofIfAddrlblmsg = 0xc - SizeofIfaCacheinfo = 0x10 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 - SizeofNdUseroptmsg = 0x10 - SizeofNdMsg = 0xc + NDA_UNSPEC = 0x0 + NDA_DST = 0x1 + NDA_LLADDR = 0x2 + NDA_CACHEINFO = 0x3 + NDA_PROBES = 0x4 + NDA_VLAN = 0x5 + NDA_PORT = 0x6 + NDA_VNI = 0x7 + NDA_IFINDEX = 0x8 + NDA_MASTER = 0x9 + NDA_LINK_NETNSID = 0xa + NDA_SRC_VNI = 0xb + NTF_USE = 0x1 + NTF_SELF = 0x2 + NTF_MASTER = 0x4 + NTF_PROXY = 0x8 + NTF_EXT_LEARNED = 0x10 + NTF_OFFLOADED = 0x20 + NTF_ROUTER = 0x80 + NUD_INCOMPLETE = 0x1 + NUD_REACHABLE = 0x2 + NUD_STALE = 0x4 + NUD_DELAY = 0x8 + NUD_PROBE = 0x10 + NUD_FAILED = 0x20 + NUD_NOARP = 0x40 + NUD_PERMANENT = 0x80 + NUD_NONE = 0x0 + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFA_FLAGS = 0x8 + IFA_RT_PRIORITY = 0x9 + IFA_TARGET_NETNSID = 0xa + IFAL_LABEL = 0x2 + IFAL_ADDRESS = 0x1 + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 + RTA_EXPIRES = 0x17 + RTA_PAD = 0x18 + RTA_UID = 0x19 + RTA_TTL_PROPAGATE = 0x1a + RTA_IP_PROTO = 0x1b + RTA_SPORT = 0x1c + RTA_DPORT = 0x1d + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + PREFIX_UNSPEC = 0x0 + PREFIX_ADDRESS = 0x1 + PREFIX_CACHEINFO = 0x2 + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofPrefixmsg = 0xc + SizeofPrefixCacheinfo = 0x8 + SizeofIfAddrmsg = 0x8 + SizeofIfAddrlblmsg = 0xc + SizeofIfaCacheinfo = 0x10 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 + SizeofNdUseroptmsg = 0x10 + SizeofNdMsg = 0xc ) type NlMsghdr struct { @@ -735,6 +740,22 @@ type IfInfomsg struct { Change uint32 } +type Prefixmsg struct { + Family uint8 + Pad1 uint8 + Pad2 uint16 + Ifindex int32 + Type uint8 + Len uint8 + Flags uint8 + Pad3 uint8 +} + +type PrefixCacheinfo struct { + Preferred_time uint32 + Valid_time uint32 +} + type IfAddrmsg struct { Family uint8 Prefixlen uint8 diff --git a/vendor/golang.org/x/sys/windows/aliases.go b/vendor/golang.org/x/sys/windows/aliases.go index 16f90560a..96317966e 100644 --- a/vendor/golang.org/x/sys/windows/aliases.go +++ b/vendor/golang.org/x/sys/windows/aliases.go @@ -8,5 +8,6 @@ package windows import "syscall" +type Signal = syscall.Signal type Errno = syscall.Errno type SysProcAttr = syscall.SysProcAttr diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go index 3ca814f54..1157b06d8 100644 --- a/vendor/golang.org/x/sys/windows/dll_windows.go +++ b/vendor/golang.org/x/sys/windows/dll_windows.go @@ -163,42 +163,7 @@ func (p *Proc) Addr() uintptr { // (according to the semantics of the specific function being called) before consulting // the error. The error will be guaranteed to contain windows.Errno. func (p *Proc) Call(a ...uintptr) (r1, r2 uintptr, lastErr error) { - switch len(a) { - case 0: - return syscall.Syscall(p.Addr(), uintptr(len(a)), 0, 0, 0) - case 1: - return syscall.Syscall(p.Addr(), uintptr(len(a)), a[0], 0, 0) - case 2: - return syscall.Syscall(p.Addr(), uintptr(len(a)), a[0], a[1], 0) - case 3: - return syscall.Syscall(p.Addr(), uintptr(len(a)), a[0], a[1], a[2]) - case 4: - return syscall.Syscall6(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], 0, 0) - case 5: - return syscall.Syscall6(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], 0) - case 6: - return syscall.Syscall6(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5]) - case 7: - return syscall.Syscall9(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], 0, 0) - case 8: - return syscall.Syscall9(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], 0) - case 9: - return syscall.Syscall9(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8]) - case 10: - return syscall.Syscall12(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], 0, 0) - case 11: - return syscall.Syscall12(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], 0) - case 12: - return syscall.Syscall12(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11]) - case 13: - return syscall.Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], 0, 0) - case 14: - return syscall.Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], 0) - case 15: - return syscall.Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], a[14]) - default: - panic("Call " + p.Name + " with too many arguments " + itoa(len(a)) + ".") - } + return syscall.SyscallN(p.Addr(), a...) } // A LazyDLL implements access to a single DLL. diff --git a/vendor/golang.org/x/sys/windows/registry/key.go b/vendor/golang.org/x/sys/windows/registry/key.go index 39aeeb644..7cc6ff3af 100644 --- a/vendor/golang.org/x/sys/windows/registry/key.go +++ b/vendor/golang.org/x/sys/windows/registry/key.go @@ -198,7 +198,20 @@ type KeyInfo struct { // ModTime returns the key's last write time. func (ki *KeyInfo) ModTime() time.Time { - return time.Unix(0, ki.lastWriteTime.Nanoseconds()) + lastHigh, lastLow := ki.lastWriteTime.HighDateTime, ki.lastWriteTime.LowDateTime + // 100-nanosecond intervals since January 1, 1601 + hsec := uint64(lastHigh)<<32 + uint64(lastLow) + // Convert _before_ gauging; the nanosecond difference between Epoch (00:00:00 + // UTC, January 1, 1970) and Filetime's zero offset (January 1, 1601) is out + // of bounds for int64: -11644473600*1e7*1e2 < math.MinInt64 + sec := int64(hsec/1e7) - 11644473600 + nsec := int64(hsec%1e7) * 100 + return time.Unix(sec, nsec) +} + +// modTimeZero reports whether the key's last write time is zero. +func (ki *KeyInfo) modTimeZero() bool { + return ki.lastWriteTime.LowDateTime == 0 && ki.lastWriteTime.HighDateTime == 0 } // Stat retrieves information about the open key k. diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index a8b0364c7..6c955cea1 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -1438,13 +1438,17 @@ func GetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformati } // GetNamedSecurityInfo queries the security information for a given named object and returns the self-relative security -// descriptor result on the Go heap. +// descriptor result on the Go heap. The security descriptor might be nil, even when err is nil, if the object exists +// but has no security descriptor. func GetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION) (sd *SECURITY_DESCRIPTOR, err error) { var winHeapSD *SECURITY_DESCRIPTOR err = getNamedSecurityInfo(objectName, objectType, securityInformation, nil, nil, nil, nil, &winHeapSD) if err != nil { return } + if winHeapSD == nil { + return nil, nil + } defer LocalFree(Handle(unsafe.Pointer(winHeapSD))) return winHeapSD.copySelfRelativeSecurityDescriptor(), nil } diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 738a9f212..d76643658 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -1490,20 +1490,6 @@ func Getgid() (gid int) { return -1 } func Getegid() (egid int) { return -1 } func Getgroups() (gids []int, err error) { return nil, syscall.EWINDOWS } -type Signal int - -func (s Signal) Signal() {} - -func (s Signal) String() string { - if 0 <= s && int(s) < len(signals) { - str := signals[s] - if str != "" { - return str - } - } - return "signal " + itoa(int(s)) -} - func LoadCreateSymbolicLink() error { return procCreateSymbolicLinkW.Find() } diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 680a70ca8..a6c17cf63 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -61,13 +61,42 @@ func (r *responseDeduper) addAll(dr *DriverResponse) { } func (r *responseDeduper) addPackage(p *Package) { - if r.seenPackages[p.ID] != nil { + if prev := r.seenPackages[p.ID]; prev != nil { + // Package already seen in a previous response. Merge the file lists, + // removing duplicates. This can happen when the same package appears + // in multiple driver responses that are being merged together. + prev.GoFiles = appendUniqueStrings(prev.GoFiles, p.GoFiles) + prev.CompiledGoFiles = appendUniqueStrings(prev.CompiledGoFiles, p.CompiledGoFiles) + prev.OtherFiles = appendUniqueStrings(prev.OtherFiles, p.OtherFiles) + prev.IgnoredFiles = appendUniqueStrings(prev.IgnoredFiles, p.IgnoredFiles) + prev.EmbedFiles = appendUniqueStrings(prev.EmbedFiles, p.EmbedFiles) + prev.EmbedPatterns = appendUniqueStrings(prev.EmbedPatterns, p.EmbedPatterns) return } r.seenPackages[p.ID] = p r.dr.Packages = append(r.dr.Packages, p) } +// appendUniqueStrings appends elements from src to dst, skipping duplicates. +func appendUniqueStrings(dst, src []string) []string { + if len(src) == 0 { + return dst + } + + seen := make(map[string]bool, len(dst)) + for _, s := range dst { + seen[s] = true + } + + for _, s := range src { + if !seen[s] { + dst = append(dst, s) + } + } + + return dst +} + func (r *responseDeduper) addRoot(id string) { if r.seenRoots[id] { return @@ -832,6 +861,8 @@ func golistargs(cfg *Config, words []string, goVersion int) []string { // go list doesn't let you pass -test and -find together, // probably because you'd just get the TestMain. fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0 && !usesExportData(cfg)), + // VCS information is not needed when not printing Stale or StaleReason fields + "-buildvcs=false", } // golang/go#60456: with go1.21 and later, go list serves pgo variants, which diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index b249a5c7e..412ba06b5 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -403,6 +403,10 @@ func mergeResponses(responses ...*DriverResponse) *DriverResponse { if len(responses) == 0 { return nil } + // No dedup needed + if len(responses) == 1 { + return responses[0] + } response := newDeduper() response.dr.NotHandled = false response.dr.Compiler = responses[0].Compiler diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index 56723d1f8..77aad553d 100644 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -524,7 +524,7 @@ func (f *finder) find(T types.Type, path []byte) []byte { for i := 0; i < T.NumMethods(); i++ { m := T.Method(i) if f.seenMethods[m] { - return nil + continue // break cycles (see TestIssue70418) } path2 := appendOpArg(path, opMethod, i) if m == f.obj { diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader.go similarity index 88% rename from vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go rename to vendor/golang.org/x/tools/internal/gcimporter/ureader.go index 2e0d80585..3db62b890 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader.go @@ -35,6 +35,10 @@ type pkgReader struct { // laterFns holds functions that need to be invoked at the end of // import reading. + // + // TODO(mdempsky): Is it safe to have a single "later" slice or do + // we need to have multiple passes? See comments on CL 386002 and + // go.dev/issue/52104. laterFns []func() // laterFors is used in case of 'type A B' to ensure that B is processed before A. laterFors map[types.Type]int @@ -158,12 +162,11 @@ type reader struct { // A readerDict holds the state for type parameters that parameterize // the current unified IR element. type readerDict struct { - // bounds is a slice of typeInfos corresponding to the underlying - // bounds of the element's type parameters. - bounds []typeInfo + rtbounds []typeInfo // contains constraint types for each parameter in rtparams + rtparams []*types.TypeParam // contains receiver type parameters for an element - // tparams is a slice of the constructed TypeParams for the element. - tparams []*types.TypeParam + tbounds []typeInfo // contains constraint types for each parameter in tparams + tparams []*types.TypeParam // contains type parameters for an element // derived is a slice of types derived from tparams, which may be // instantiated while reading the current element. @@ -353,7 +356,11 @@ func (r *reader) doTyp() (res types.Type) { return name.Type() case pkgbits.TypeTypeParam: - return r.dict.tparams[r.Len()] + n := r.Len() + if n < len(r.dict.rtbounds) { + return r.dict.rtparams[n] + } + return r.dict.tparams[n-len(r.dict.rtbounds)] case pkgbits.TypeArray: len := int64(r.Uint64()) @@ -534,7 +541,7 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { pos := r.pos() var tparams []*types.TypeParam if r.Version().Has(pkgbits.AliasTypeParamNames) { - tparams = r.typeParamNames() + tparams = r.typeParamNames(false) } typ := r.typ() declare(aliases.New(pos, objPkg, objName, typ, tparams)) @@ -547,8 +554,15 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { case pkgbits.ObjFunc: pos := r.pos() - tparams := r.typeParamNames() - sig := r.signature(nil, nil, tparams) + var rtparams []*types.TypeParam + var recv *types.Var + if r.Version().Has(pkgbits.GenericMethods) && r.Bool() { + r.selector() + rtparams = r.typeParamNames(true) + recv = r.param() + } + tparams := r.typeParamNames(false) + sig := r.signature(recv, rtparams, tparams) declare(types.NewFunc(pos, objPkg, objName, sig)) case pkgbits.ObjType: @@ -558,7 +572,7 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { named := types.NewNamed(obj, nil, nil) declare(obj) - named.SetTypeParams(r.typeParamNames()) + named.SetTypeParams(r.typeParamNames(false)) setUnderlying := func(underlying types.Type) { // If the underlying type is an interface, we need to @@ -638,9 +652,20 @@ func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict { errorf("unexpected object with %v implicit type parameter(s)", implicits) } - dict.bounds = make([]typeInfo, r.Len()) - for i := range dict.bounds { - dict.bounds[i] = r.typInfo() + nreceivers := 0 + if r.Version().Has(pkgbits.GenericMethods) && r.Bool() { + nreceivers = r.Len() + } + nexplicits := r.Len() + + dict.rtbounds = make([]typeInfo, nreceivers) + for i := range dict.rtbounds { + dict.rtbounds[i] = r.typInfo() + } + + dict.tbounds = make([]typeInfo, nexplicits) + for i := range dict.tbounds { + dict.tbounds[i] = r.typInfo() } dict.derived = make([]derivedInfo, r.Len()) @@ -659,15 +684,24 @@ func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict { return &dict } -func (r *reader) typeParamNames() []*types.TypeParam { +func (r *reader) typeParamNames(isGenMeth bool) []*types.TypeParam { r.Sync(pkgbits.SyncTypeParamNames) - // Note: This code assumes it only processes objects without - // implement type parameters. This is currently fine, because - // reader is only used to read in exported declarations, which are - // always package scoped. + // Note: This code assumes there are no implicit type parameters. + // This is fine since it only reads exported declarations, which + // never have implicits. - if len(r.dict.bounds) == 0 { + var in []typeInfo + var out *[]*types.TypeParam + if isGenMeth { + in = r.dict.rtbounds + out = &r.dict.rtparams + } else { + in = r.dict.tbounds + out = &r.dict.tparams + } + + if len(in) == 0 { return nil } @@ -676,40 +710,34 @@ func (r *reader) typeParamNames() []*types.TypeParam { // create all the TypeNames and TypeParams, then we construct and // set the bound type. - r.dict.tparams = make([]*types.TypeParam, len(r.dict.bounds)) - for i := range r.dict.bounds { + // We have to save tparams outside of the closure, because typeParamNames + // can be called multiple times with the same dictionary instance. + tparams := make([]*types.TypeParam, len(in)) + *out = tparams + + for i := range in { pos := r.pos() pkg, name := r.localIdent() tname := types.NewTypeName(pos, pkg, name, nil) - r.dict.tparams[i] = types.NewTypeParam(tname, nil) + tparams[i] = types.NewTypeParam(tname, nil) } - typs := make([]types.Type, len(r.dict.bounds)) - for i, bound := range r.dict.bounds { - typs[i] = r.p.typIdx(bound, r.dict) + // The reader dictionary will continue mutating before we have time + // to call delayed functions; make a local copy of the constraints. + types := make([]types.Type, len(in)) + for i, info := range in { + types[i] = r.p.typIdx(info, r.dict) } - // TODO(mdempsky): This is subtle, elaborate further. - // - // We have to save tparams outside of the closure, because - // typeParamNames() can be called multiple times with the same - // dictionary instance. - // - // Also, this needs to happen later to make sure SetUnderlying has - // been called. - // - // TODO(mdempsky): Is it safe to have a single "later" slice or do - // we need to have multiple passes? See comments on CL 386002 and - // go.dev/issue/52104. - tparams := r.dict.tparams + // This needs to happen later to make sure SetUnderlying has been called. r.p.later(func() { - for i, typ := range typs { + for i, typ := range types { tparams[i].SetConstraint(typ) } }) - return r.dict.tparams + return tparams } func (r *reader) method() *types.Func { @@ -717,7 +745,7 @@ func (r *reader) method() *types.Func { pos := r.pos() pkg, name := r.selector() - rparams := r.typeParamNames() + rparams := r.typeParamNames(false) sig := r.signature(r.param(), rparams, nil) _ = r.pos() // TODO(mdempsky): Remove; this is a hacker for linker.go. diff --git a/vendor/golang.org/x/tools/internal/gocommand/version.go b/vendor/golang.org/x/tools/internal/gocommand/version.go index 446c5846a..cce290c41 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/version.go +++ b/vendor/golang.org/x/tools/internal/gocommand/version.go @@ -26,6 +26,9 @@ func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) { inv.BuildFlags = nil // This is not a build command. inv.ModFlag = "" inv.ModFile = "" + // Set GO111MODULE=off so that we are immune to errors in go.{work,mod}. + // Unfortunately, this breaks the Go 1.21+ toolchain directive and + // may affect the set of ReleaseTags; see #68495. inv.Env = append(inv.Env[:len(inv.Env):len(inv.Env)], "GO111MODULE=off") stdoutBytes, err := r.Run(ctx, inv) diff --git a/vendor/golang.org/x/tools/internal/pkgbits/version.go b/vendor/golang.org/x/tools/internal/pkgbits/version.go index 53af9df22..0db965274 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/version.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/version.go @@ -28,6 +28,15 @@ const ( // - remove derived info "needed" bool V2 + // V3: introduces a more compact format for composite literal element lists + // - negative lengths indicate that (some) elements may have keys + // - positive lengths indicate that no element has a key + // - a negative struct field index indicates an embedded field + V3 + + // V4: encodes generic methods as standalone function objects + V4 + numVersions = iota ) @@ -61,6 +70,12 @@ const ( // whether a type was a derived type. DerivedInfoNeeded + // Composite literals use a more compact format for element lists. + CompactCompLiterals + + // Generic methods may appear as standalone function objects. + GenericMethods + numFields = iota ) @@ -68,6 +83,8 @@ const ( var introduced = [numFields]Version{ Flags: V1, AliasTypeParamNames: V2, + CompactCompLiterals: V3, + GenericMethods: V4, } // removed is the version a field was removed in or 0 for fields diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go index 27a2b1792..2e05de464 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/coretype.go +++ b/vendor/golang.org/x/tools/internal/typeparams/coretype.go @@ -11,7 +11,9 @@ import ( // CoreType returns the core type of T or nil if T does not have a core type. // -// See https://go.dev/ref/spec#Core_types for the definition of a core type. +// As of Go1.25, the notion of a core type has been removed from the language spec. +// See https://go.dev/blog/coretypes for more details. +// TODO(mkalil): We should eventually consider removing all uses of CoreType. func CoreType(T types.Type) types.Type { U := T.Underlying() if _, ok := U.(*types.Interface); !ok { @@ -34,7 +36,7 @@ func CoreType(T types.Type) types.Type { } if identical == len(terms) { - // https://go.dev/ref/spec#Core_types + // From the deprecated core types spec: // "There is a single type U which is the underlying type of all types in the type set of T" return U } @@ -42,7 +44,7 @@ func CoreType(T types.Type) types.Type { if !ok { return nil // no core type as identical < len(terms) and U is not a channel. } - // https://go.dev/ref/spec#Core_types + // From the deprecated core types spec: // "the type chan E if T contains only bidirectional channels, or the type chan<- E or // <-chan E depending on the direction of the directional channels present." for chans := identical; chans < len(terms); chans++ { diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go index 7112318fc..6582cc81f 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/types.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -194,3 +194,51 @@ func Imports(pkg *types.Package, path string) bool { } return false } + +// ObjectKind returns a description of the object's kind. +// +// from objectKind in go/types +func ObjectKind(obj types.Object) string { + switch obj := obj.(type) { + case *types.PkgName: + return "package name" + case *types.Const: + return "constant" + case *types.TypeName: + if obj.IsAlias() { + return "type alias" + } else if _, ok := obj.Type().(*types.TypeParam); ok { + return "type parameter" + } else { + return "defined type" + } + case *types.Var: + switch obj.Kind() { + case PackageVar: + return "package-level variable" + case LocalVar: + return "local variable" + case RecvVar: + return "receiver" + case ParamVar: + return "parameter" + case ResultVar: + return "result variable" + case FieldVar: + return "struct field" + } + case *types.Func: + if obj.Signature().Recv() != nil { + return "method" + } else { + return "function" + } + case *types.Label: + return "label" + case *types.Builtin: + return "built-in function" + case *types.Nil: + return "untyped nil" + } + return "unknown symbol" +} diff --git a/vendor/golang.org/x/tools/internal/versions/features.go b/vendor/golang.org/x/tools/internal/versions/features.go index cdd36c388..360a5b552 100644 --- a/vendor/golang.org/x/tools/internal/versions/features.go +++ b/vendor/golang.org/x/tools/internal/versions/features.go @@ -19,6 +19,7 @@ const ( Go1_24 = "go1.24" Go1_25 = "go1.25" Go1_26 = "go1.26" + Go1_27 = "go1.27" ) // Future is an invalid unknown Go version sometime in the future. diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go index f84048172..382a9f007 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -1,4 +1,4 @@ -// Copyright 2025 Google LLC +// Copyright 2026 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -167,6 +167,63 @@ func (ClientLibraryDestination) EnumDescriptor() ([]byte, []int) { return file_google_api_client_proto_rawDescGZIP(), []int{1} } +// The behavior to take when the flow control limit is exceeded. +type FlowControlLimitExceededBehaviorProto int32 + +const ( + // Default behavior, system-defined. + FlowControlLimitExceededBehaviorProto_UNSET_BEHAVIOR FlowControlLimitExceededBehaviorProto = 0 + // Stop operation, raise error. + FlowControlLimitExceededBehaviorProto_THROW_EXCEPTION FlowControlLimitExceededBehaviorProto = 1 + // Pause operation until limit clears. + FlowControlLimitExceededBehaviorProto_BLOCK FlowControlLimitExceededBehaviorProto = 2 + // Continue operation, disregard limit. + FlowControlLimitExceededBehaviorProto_IGNORE FlowControlLimitExceededBehaviorProto = 3 +) + +// Enum value maps for FlowControlLimitExceededBehaviorProto. +var ( + FlowControlLimitExceededBehaviorProto_name = map[int32]string{ + 0: "UNSET_BEHAVIOR", + 1: "THROW_EXCEPTION", + 2: "BLOCK", + 3: "IGNORE", + } + FlowControlLimitExceededBehaviorProto_value = map[string]int32{ + "UNSET_BEHAVIOR": 0, + "THROW_EXCEPTION": 1, + "BLOCK": 2, + "IGNORE": 3, + } +) + +func (x FlowControlLimitExceededBehaviorProto) Enum() *FlowControlLimitExceededBehaviorProto { + p := new(FlowControlLimitExceededBehaviorProto) + *p = x + return p +} + +func (x FlowControlLimitExceededBehaviorProto) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FlowControlLimitExceededBehaviorProto) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_client_proto_enumTypes[2].Descriptor() +} + +func (FlowControlLimitExceededBehaviorProto) Type() protoreflect.EnumType { + return &file_google_api_client_proto_enumTypes[2] +} + +func (x FlowControlLimitExceededBehaviorProto) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use FlowControlLimitExceededBehaviorProto.Descriptor instead. +func (FlowControlLimitExceededBehaviorProto) EnumDescriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{2} +} + // Required information for every language. type CommonLanguageSettings struct { state protoimpl.MessageState @@ -181,6 +238,8 @@ type CommonLanguageSettings struct { // The destination where API teams want this client library to be published. Destinations []ClientLibraryDestination `protobuf:"varint,2,rep,packed,name=destinations,proto3,enum=google.api.ClientLibraryDestination" json:"destinations,omitempty"` // Configuration for which RPCs should be generated in the GAPIC client. + // + // Note: This field should not be used in most cases. SelectiveGapicGeneration *SelectiveGapicGeneration `protobuf:"bytes,3,opt,name=selective_gapic_generation,json=selectiveGapicGeneration,proto3" json:"selective_gapic_generation,omitempty"` } @@ -547,8 +606,9 @@ type JavaSettings struct { // Example of a YAML configuration:: // // publishing: - // java_settings: - // library_package: com.google.cloud.pubsub.v1 + // library_settings: + // java_settings: + // library_package: com.google.cloud.pubsub.v1 LibraryPackage string `protobuf:"bytes,1,opt,name=library_package,json=libraryPackage,proto3" json:"library_package,omitempty"` // Configure the Java class name to use instead of the service's for its // corresponding generated GAPIC client. Keys are fully-qualified @@ -679,6 +739,19 @@ type PhpSettings struct { // Some settings. Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` + // The package name to use in Php. Clobbers the php_namespace option + // set in the protobuf. This should be used **only** by APIs + // who have already set the language_settings.php.package_name" field + // in gapic.yaml. API teams should use the protobuf php_namespace option + // where possible. + // + // Example of a YAML configuration:: + // + // publishing: + // library_settings: + // php_settings: + // library_package: Google\Cloud\PubSub\V1 + LibraryPackage string `protobuf:"bytes,2,opt,name=library_package,json=libraryPackage,proto3" json:"library_package,omitempty"` } func (x *PhpSettings) Reset() { @@ -720,6 +793,13 @@ func (x *PhpSettings) GetCommon() *CommonLanguageSettings { return nil } +func (x *PhpSettings) GetLibraryPackage() string { + if x != nil { + return x.LibraryPackage + } + return "" +} + // Settings for Python client libraries. type PythonSettings struct { state protoimpl.MessageState @@ -997,11 +1077,12 @@ type GoSettings struct { // service names and values are the name to be used for the service client // and call options. // - // publishing: + // Example: // - // go_settings: - // renamed_services: - // Publisher: TopicAdmin + // publishing: + // go_settings: + // renamed_services: + // Publisher: TopicAdmin RenamedServices map[string]string `protobuf:"bytes,2,rep,name=renamed_services,json=renamedServices,proto3" json:"renamed_services,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } @@ -1094,6 +1175,18 @@ type MethodSettings struct { // auto_populated_fields: // - request_id AutoPopulatedFields []string `protobuf:"bytes,3,rep,name=auto_populated_fields,json=autoPopulatedFields,proto3" json:"auto_populated_fields,omitempty"` + // Batching configuration for an API method in client libraries. + // + // Example of a YAML configuration: + // + // publishing: + // method_settings: + // - selector: google.example.v1.ExampleService.BatchCreateExample + // batching: + // element_count_threshold: 1000 + // request_byte_threshold: 100000000 + // delay_threshold_millis: 10 + Batching *BatchingConfigProto `protobuf:"bytes,4,opt,name=batching,proto3" json:"batching,omitempty"` } func (x *MethodSettings) Reset() { @@ -1149,8 +1242,17 @@ func (x *MethodSettings) GetAutoPopulatedFields() []string { return nil } +func (x *MethodSettings) GetBatching() *BatchingConfigProto { + if x != nil { + return x.Batching + } + return nil +} + // This message is used to configure the generation of a subset of the RPCs in // a service for client libraries. +// +// Note: This feature should not be used in most cases. type SelectiveGapicGeneration struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1214,6 +1316,257 @@ func (x *SelectiveGapicGeneration) GetGenerateOmittedAsInternal() bool { return false } +// `BatchingConfigProto` defines the batching configuration for an API method. +type BatchingConfigProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The thresholds which trigger a batched request to be sent. + Thresholds *BatchingSettingsProto `protobuf:"bytes,1,opt,name=thresholds,proto3" json:"thresholds,omitempty"` + // The request and response fields used in batching. + BatchDescriptor *BatchingDescriptorProto `protobuf:"bytes,2,opt,name=batch_descriptor,json=batchDescriptor,proto3" json:"batch_descriptor,omitempty"` +} + +func (x *BatchingConfigProto) Reset() { + *x = BatchingConfigProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BatchingConfigProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatchingConfigProto) ProtoMessage() {} + +func (x *BatchingConfigProto) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BatchingConfigProto.ProtoReflect.Descriptor instead. +func (*BatchingConfigProto) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{13} +} + +func (x *BatchingConfigProto) GetThresholds() *BatchingSettingsProto { + if x != nil { + return x.Thresholds + } + return nil +} + +func (x *BatchingConfigProto) GetBatchDescriptor() *BatchingDescriptorProto { + if x != nil { + return x.BatchDescriptor + } + return nil +} + +// `BatchingSettingsProto` specifies a set of batching thresholds, each of +// which acts as a trigger to send a batch of messages as a request. At least +// one threshold must be positive nonzero. +type BatchingSettingsProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The number of elements of a field collected into a batch which, if + // exceeded, causes the batch to be sent. + ElementCountThreshold int32 `protobuf:"varint,1,opt,name=element_count_threshold,json=elementCountThreshold,proto3" json:"element_count_threshold,omitempty"` + // The aggregated size of the batched field which, if exceeded, causes the + // batch to be sent. This size is computed by aggregating the sizes of the + // request field to be batched, not of the entire request message. + RequestByteThreshold int64 `protobuf:"varint,2,opt,name=request_byte_threshold,json=requestByteThreshold,proto3" json:"request_byte_threshold,omitempty"` + // The duration after which a batch should be sent, starting from the addition + // of the first message to that batch. + DelayThreshold *durationpb.Duration `protobuf:"bytes,3,opt,name=delay_threshold,json=delayThreshold,proto3" json:"delay_threshold,omitempty"` + // The maximum number of elements collected in a batch that could be accepted + // by server. + ElementCountLimit int32 `protobuf:"varint,4,opt,name=element_count_limit,json=elementCountLimit,proto3" json:"element_count_limit,omitempty"` + // The maximum size of the request that could be accepted by server. + RequestByteLimit int32 `protobuf:"varint,5,opt,name=request_byte_limit,json=requestByteLimit,proto3" json:"request_byte_limit,omitempty"` + // The maximum number of elements allowed by flow control. + FlowControlElementLimit int32 `protobuf:"varint,6,opt,name=flow_control_element_limit,json=flowControlElementLimit,proto3" json:"flow_control_element_limit,omitempty"` + // The maximum size of data allowed by flow control. + FlowControlByteLimit int32 `protobuf:"varint,7,opt,name=flow_control_byte_limit,json=flowControlByteLimit,proto3" json:"flow_control_byte_limit,omitempty"` + // The behavior to take when the flow control limit is exceeded. + FlowControlLimitExceededBehavior FlowControlLimitExceededBehaviorProto `protobuf:"varint,8,opt,name=flow_control_limit_exceeded_behavior,json=flowControlLimitExceededBehavior,proto3,enum=google.api.FlowControlLimitExceededBehaviorProto" json:"flow_control_limit_exceeded_behavior,omitempty"` +} + +func (x *BatchingSettingsProto) Reset() { + *x = BatchingSettingsProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BatchingSettingsProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatchingSettingsProto) ProtoMessage() {} + +func (x *BatchingSettingsProto) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BatchingSettingsProto.ProtoReflect.Descriptor instead. +func (*BatchingSettingsProto) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{14} +} + +func (x *BatchingSettingsProto) GetElementCountThreshold() int32 { + if x != nil { + return x.ElementCountThreshold + } + return 0 +} + +func (x *BatchingSettingsProto) GetRequestByteThreshold() int64 { + if x != nil { + return x.RequestByteThreshold + } + return 0 +} + +func (x *BatchingSettingsProto) GetDelayThreshold() *durationpb.Duration { + if x != nil { + return x.DelayThreshold + } + return nil +} + +func (x *BatchingSettingsProto) GetElementCountLimit() int32 { + if x != nil { + return x.ElementCountLimit + } + return 0 +} + +func (x *BatchingSettingsProto) GetRequestByteLimit() int32 { + if x != nil { + return x.RequestByteLimit + } + return 0 +} + +func (x *BatchingSettingsProto) GetFlowControlElementLimit() int32 { + if x != nil { + return x.FlowControlElementLimit + } + return 0 +} + +func (x *BatchingSettingsProto) GetFlowControlByteLimit() int32 { + if x != nil { + return x.FlowControlByteLimit + } + return 0 +} + +func (x *BatchingSettingsProto) GetFlowControlLimitExceededBehavior() FlowControlLimitExceededBehaviorProto { + if x != nil { + return x.FlowControlLimitExceededBehavior + } + return FlowControlLimitExceededBehaviorProto_UNSET_BEHAVIOR +} + +// `BatchingDescriptorProto` specifies the fields of the request message to be +// used for batching, and, optionally, the fields of the response message to be +// used for demultiplexing. +type BatchingDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The repeated field in the request message to be aggregated by batching. + BatchedField string `protobuf:"bytes,1,opt,name=batched_field,json=batchedField,proto3" json:"batched_field,omitempty"` + // A list of the fields in the request message. Two requests will be batched + // together only if the values of every field specified in + // `request_discriminator_fields` is equal between the two requests. + DiscriminatorFields []string `protobuf:"bytes,2,rep,name=discriminator_fields,json=discriminatorFields,proto3" json:"discriminator_fields,omitempty"` + // Optional. When present, indicates the field in the response message to be + // used to demultiplex the response into multiple response messages, in + // correspondence with the multiple request messages originally batched + // together. + SubresponseField string `protobuf:"bytes,3,opt,name=subresponse_field,json=subresponseField,proto3" json:"subresponse_field,omitempty"` +} + +func (x *BatchingDescriptorProto) Reset() { + *x = BatchingDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BatchingDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatchingDescriptorProto) ProtoMessage() {} + +func (x *BatchingDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BatchingDescriptorProto.ProtoReflect.Descriptor instead. +func (*BatchingDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{15} +} + +func (x *BatchingDescriptorProto) GetBatchedField() string { + if x != nil { + return x.BatchedField + } + return "" +} + +func (x *BatchingDescriptorProto) GetDiscriminatorFields() []string { + if x != nil { + return x.DiscriminatorFields + } + return nil +} + +func (x *BatchingDescriptorProto) GetSubresponseField() string { + if x != nil { + return x.SubresponseField + } + return "" +} + // Experimental features to be included during client library generation. // These fields will be deprecated once the feature graduates and is enabled // by default. @@ -1242,7 +1595,7 @@ type PythonSettings_ExperimentalFeatures struct { func (x *PythonSettings_ExperimentalFeatures) Reset() { *x = PythonSettings_ExperimentalFeatures{} if protoimpl.UnsafeEnabled { - mi := &file_google_api_client_proto_msgTypes[14] + mi := &file_google_api_client_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1255,7 +1608,7 @@ func (x *PythonSettings_ExperimentalFeatures) String() string { func (*PythonSettings_ExperimentalFeatures) ProtoMessage() {} func (x *PythonSettings_ExperimentalFeatures) ProtoReflect() protoreflect.Message { - mi := &file_google_api_client_proto_msgTypes[14] + mi := &file_google_api_client_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1320,7 +1673,7 @@ type MethodSettings_LongRunning struct { func (x *MethodSettings_LongRunning) Reset() { *x = MethodSettings_LongRunning{} if protoimpl.UnsafeEnabled { - mi := &file_google_api_client_proto_msgTypes[18] + mi := &file_google_api_client_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1333,7 +1686,7 @@ func (x *MethodSettings_LongRunning) String() string { func (*MethodSettings_LongRunning) ProtoMessage() {} func (x *MethodSettings_LongRunning) ProtoReflect() protoreflect.Message { - mi := &file_google_api_client_proto_msgTypes[18] + mi := &file_google_api_client_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1640,173 +1993,241 @@ var file_google_api_client_proto_rawDesc = []byte{ 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x49, 0x0a, 0x0b, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x72, 0x0a, 0x0b, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x22, 0x87, 0x03, 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, + 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x63, + 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x69, 0x62, 0x72, + 0x61, 0x72, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x22, 0x87, 0x03, 0x0a, 0x0e, 0x50, + 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, + 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x64, 0x0a, 0x15, 0x65, 0x78, 0x70, + 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, + 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x14, 0x65, 0x78, 0x70, 0x65, 0x72, + 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, + 0xd2, 0x01, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x15, 0x72, 0x65, 0x73, 0x74, + 0x5f, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x6f, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x72, 0x65, 0x73, 0x74, 0x41, 0x73, 0x79, + 0x6e, 0x63, 0x49, 0x6f, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x45, 0x0a, 0x1f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x5f, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x69, 0x63, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x1c, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x50, 0x79, + 0x74, 0x68, 0x6f, 0x6e, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x12, 0x40, 0x0a, 0x1c, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, + 0x64, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x44, 0x69, 0x73, 0x61, + 0x62, 0x6c, 0x65, 0x64, 0x22, 0x4a, 0x0a, 0x0c, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x12, 0x64, 0x0a, 0x15, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, - 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, - 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x65, - 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, - 0x52, 0x14, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, 0xd2, 0x01, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x65, 0x72, - 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, - 0x31, 0x0a, 0x15, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x6f, - 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, - 0x72, 0x65, 0x73, 0x74, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x49, 0x6f, 0x45, 0x6e, 0x61, 0x62, 0x6c, - 0x65, 0x64, 0x12, 0x45, 0x0a, 0x1f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x5f, 0x70, - 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x69, 0x63, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x5f, 0x65, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1c, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x69, 0x63, 0x54, 0x79, 0x70, - 0x65, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x40, 0x0a, 0x1c, 0x75, 0x6e, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, - 0x5f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x1a, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x50, 0x61, 0x63, 0x6b, - 0x61, 0x67, 0x65, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x4a, 0x0a, 0x0c, 0x4e, - 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, - 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, - 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e, - 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, - 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, - 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, - 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, - 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, - 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, - 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x67, - 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x38, - 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, 0x6e, 0x64, - 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, - 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, - 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79, - 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, - 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xe4, 0x01, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, - 0x56, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, - 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, - 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc2, 0x03, 0x0a, 0x0e, - 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a, - 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, 0x0c, 0x6c, 0x6f, - 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, - 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x4c, 0x6f, 0x6e, - 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x52, 0x75, - 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x70, 0x6f, - 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, - 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, 0x02, 0x0a, 0x0b, 0x4c, 0x6f, - 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, 0x12, 0x69, 0x6e, 0x69, - 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, - 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, - 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x4d, 0x75, 0x6c, 0x74, - 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f, - 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x50, 0x6f, - 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, - 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, - 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, - 0x22, 0x75, 0x0a, 0x18, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x47, 0x61, 0x70, - 0x69, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, - 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6d, - 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x12, 0x3f, 0x0a, 0x1c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x65, 0x5f, 0x6f, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x73, 0x5f, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x4f, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x41, 0x73, 0x49, - 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, - 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, - 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, - 0x03, 0x41, 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, - 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, - 0x57, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, - 0x05, 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, - 0x4e, 0x45, 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, - 0x18, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, - 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, - 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, - 0x49, 0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, - 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, - 0x0a, 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, - 0x41, 0x47, 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, - 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, - 0x73, 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, - 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, - 0x61, 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, - 0xfa, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x42, 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, - 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, + 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, + 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, + 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, + 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, + 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, + 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, + 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, + 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, + 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, + 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xe4, 0x01, + 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x56, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, + 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, + 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0xff, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, + 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, + 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, + 0x0a, 0x15, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, + 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, + 0x75, 0x74, 0x6f, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x73, 0x12, 0x3b, 0x0a, 0x08, 0x62, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x62, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x1a, + 0x94, 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, + 0x47, 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, + 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, + 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, + 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, + 0x61, 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, + 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0c, 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, + 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x75, 0x0a, 0x18, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x76, 0x65, 0x47, 0x61, 0x70, 0x69, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x12, 0x3f, 0x0a, 0x1c, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6f, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, + 0x5f, 0x61, 0x73, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x19, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x4f, 0x6d, 0x69, 0x74, + 0x74, 0x65, 0x64, 0x41, 0x73, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x22, 0xa8, 0x01, + 0x0a, 0x13, 0x42, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x41, 0x0a, 0x0a, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, + 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x74, 0x68, + 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x12, 0x4e, 0x0a, 0x10, 0x62, 0x61, 0x74, 0x63, + 0x68, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x42, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0f, 0x62, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x22, 0x9f, 0x04, 0x0a, 0x15, 0x42, 0x61, 0x74, + 0x63, 0x68, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x36, 0x0a, 0x17, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x15, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, + 0x74, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x34, 0x0a, 0x16, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, + 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x14, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x42, 0x79, 0x74, 0x65, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, + 0x12, 0x42, 0x0a, 0x0f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, + 0x6f, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x54, 0x68, 0x72, 0x65, 0x73, + 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x2e, 0x0a, 0x13, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x11, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x4c, + 0x69, 0x6d, 0x69, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, + 0x62, 0x79, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x10, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x79, 0x74, 0x65, 0x4c, 0x69, 0x6d, + 0x69, 0x74, 0x12, 0x3b, 0x0a, 0x1a, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x5f, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x05, 0x52, 0x17, 0x66, 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, + 0x35, 0x0a, 0x17, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x5f, + 0x62, 0x79, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x14, 0x66, 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x42, 0x79, 0x74, + 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x81, 0x01, 0x0a, 0x24, 0x66, 0x6c, 0x6f, 0x77, 0x5f, + 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x65, 0x78, + 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x4c, 0x69, + 0x6d, 0x69, 0x74, 0x45, 0x78, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, + 0x69, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x20, 0x66, 0x6c, 0x6f, 0x77, 0x43, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x45, 0x78, 0x63, 0x65, 0x65, 0x64, + 0x65, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x22, 0x9e, 0x01, 0x0a, 0x17, 0x42, + 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x62, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x31, 0x0a, 0x14, 0x64, + 0x69, 0x73, 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x64, 0x69, 0x73, 0x63, 0x72, + 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x2b, + 0x0a, 0x11, 0x73, 0x75, 0x62, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x73, 0x75, 0x62, 0x72, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x2a, 0xa3, 0x01, 0x0a, 0x19, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, + 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, + 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, + 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, + 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, + 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, + 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, + 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, + 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, + 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, + 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, + 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, + 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, + 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, + 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x52, 0x10, 0x14, 0x2a, 0x67, 0x0a, 0x25, 0x46, 0x6c, + 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x45, 0x78, + 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x0e, 0x55, 0x4e, 0x53, 0x45, 0x54, 0x5f, 0x42, 0x45, 0x48, + 0x41, 0x56, 0x49, 0x4f, 0x52, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x48, 0x52, 0x4f, 0x57, + 0x5f, 0x45, 0x58, 0x43, 0x45, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, + 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x49, 0x47, 0x4e, 0x4f, 0x52, + 0x45, 0x10, 0x03, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x3a, + 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x12, + 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63, + 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x61, + 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70, 0x69, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, + 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -1821,76 +2242,85 @@ func file_google_api_client_proto_rawDescGZIP() []byte { return file_google_api_client_proto_rawDescData } -var file_google_api_client_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 19) +var file_google_api_client_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 22) var file_google_api_client_proto_goTypes = []interface{}{ (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination - (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings - (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings - (*Publishing)(nil), // 4: google.api.Publishing - (*JavaSettings)(nil), // 5: google.api.JavaSettings - (*CppSettings)(nil), // 6: google.api.CppSettings - (*PhpSettings)(nil), // 7: google.api.PhpSettings - (*PythonSettings)(nil), // 8: google.api.PythonSettings - (*NodeSettings)(nil), // 9: google.api.NodeSettings - (*DotnetSettings)(nil), // 10: google.api.DotnetSettings - (*RubySettings)(nil), // 11: google.api.RubySettings - (*GoSettings)(nil), // 12: google.api.GoSettings - (*MethodSettings)(nil), // 13: google.api.MethodSettings - (*SelectiveGapicGeneration)(nil), // 14: google.api.SelectiveGapicGeneration - nil, // 15: google.api.JavaSettings.ServiceClassNamesEntry - (*PythonSettings_ExperimentalFeatures)(nil), // 16: google.api.PythonSettings.ExperimentalFeatures - nil, // 17: google.api.DotnetSettings.RenamedServicesEntry - nil, // 18: google.api.DotnetSettings.RenamedResourcesEntry - nil, // 19: google.api.GoSettings.RenamedServicesEntry - (*MethodSettings_LongRunning)(nil), // 20: google.api.MethodSettings.LongRunning - (api.LaunchStage)(0), // 21: google.api.LaunchStage - (*durationpb.Duration)(nil), // 22: google.protobuf.Duration - (*descriptorpb.MethodOptions)(nil), // 23: google.protobuf.MethodOptions - (*descriptorpb.ServiceOptions)(nil), // 24: google.protobuf.ServiceOptions + (FlowControlLimitExceededBehaviorProto)(0), // 2: google.api.FlowControlLimitExceededBehaviorProto + (*CommonLanguageSettings)(nil), // 3: google.api.CommonLanguageSettings + (*ClientLibrarySettings)(nil), // 4: google.api.ClientLibrarySettings + (*Publishing)(nil), // 5: google.api.Publishing + (*JavaSettings)(nil), // 6: google.api.JavaSettings + (*CppSettings)(nil), // 7: google.api.CppSettings + (*PhpSettings)(nil), // 8: google.api.PhpSettings + (*PythonSettings)(nil), // 9: google.api.PythonSettings + (*NodeSettings)(nil), // 10: google.api.NodeSettings + (*DotnetSettings)(nil), // 11: google.api.DotnetSettings + (*RubySettings)(nil), // 12: google.api.RubySettings + (*GoSettings)(nil), // 13: google.api.GoSettings + (*MethodSettings)(nil), // 14: google.api.MethodSettings + (*SelectiveGapicGeneration)(nil), // 15: google.api.SelectiveGapicGeneration + (*BatchingConfigProto)(nil), // 16: google.api.BatchingConfigProto + (*BatchingSettingsProto)(nil), // 17: google.api.BatchingSettingsProto + (*BatchingDescriptorProto)(nil), // 18: google.api.BatchingDescriptorProto + nil, // 19: google.api.JavaSettings.ServiceClassNamesEntry + (*PythonSettings_ExperimentalFeatures)(nil), // 20: google.api.PythonSettings.ExperimentalFeatures + nil, // 21: google.api.DotnetSettings.RenamedServicesEntry + nil, // 22: google.api.DotnetSettings.RenamedResourcesEntry + nil, // 23: google.api.GoSettings.RenamedServicesEntry + (*MethodSettings_LongRunning)(nil), // 24: google.api.MethodSettings.LongRunning + (api.LaunchStage)(0), // 25: google.api.LaunchStage + (*durationpb.Duration)(nil), // 26: google.protobuf.Duration + (*descriptorpb.MethodOptions)(nil), // 27: google.protobuf.MethodOptions + (*descriptorpb.ServiceOptions)(nil), // 28: google.protobuf.ServiceOptions } var file_google_api_client_proto_depIdxs = []int32{ 1, // 0: google.api.CommonLanguageSettings.destinations:type_name -> google.api.ClientLibraryDestination - 14, // 1: google.api.CommonLanguageSettings.selective_gapic_generation:type_name -> google.api.SelectiveGapicGeneration - 21, // 2: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage - 5, // 3: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings - 6, // 4: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings - 7, // 5: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings - 8, // 6: google.api.ClientLibrarySettings.python_settings:type_name -> google.api.PythonSettings - 9, // 7: google.api.ClientLibrarySettings.node_settings:type_name -> google.api.NodeSettings - 10, // 8: google.api.ClientLibrarySettings.dotnet_settings:type_name -> google.api.DotnetSettings - 11, // 9: google.api.ClientLibrarySettings.ruby_settings:type_name -> google.api.RubySettings - 12, // 10: google.api.ClientLibrarySettings.go_settings:type_name -> google.api.GoSettings - 13, // 11: google.api.Publishing.method_settings:type_name -> google.api.MethodSettings + 15, // 1: google.api.CommonLanguageSettings.selective_gapic_generation:type_name -> google.api.SelectiveGapicGeneration + 25, // 2: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage + 6, // 3: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings + 7, // 4: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings + 8, // 5: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings + 9, // 6: google.api.ClientLibrarySettings.python_settings:type_name -> google.api.PythonSettings + 10, // 7: google.api.ClientLibrarySettings.node_settings:type_name -> google.api.NodeSettings + 11, // 8: google.api.ClientLibrarySettings.dotnet_settings:type_name -> google.api.DotnetSettings + 12, // 9: google.api.ClientLibrarySettings.ruby_settings:type_name -> google.api.RubySettings + 13, // 10: google.api.ClientLibrarySettings.go_settings:type_name -> google.api.GoSettings + 14, // 11: google.api.Publishing.method_settings:type_name -> google.api.MethodSettings 0, // 12: google.api.Publishing.organization:type_name -> google.api.ClientLibraryOrganization - 3, // 13: google.api.Publishing.library_settings:type_name -> google.api.ClientLibrarySettings - 15, // 14: google.api.JavaSettings.service_class_names:type_name -> google.api.JavaSettings.ServiceClassNamesEntry - 2, // 15: google.api.JavaSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 16: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 17: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 18: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings - 16, // 19: google.api.PythonSettings.experimental_features:type_name -> google.api.PythonSettings.ExperimentalFeatures - 2, // 20: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 21: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings - 17, // 22: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry - 18, // 23: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry - 2, // 24: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 25: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings - 19, // 26: google.api.GoSettings.renamed_services:type_name -> google.api.GoSettings.RenamedServicesEntry - 20, // 27: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning - 22, // 28: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration - 22, // 29: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration - 22, // 30: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration - 23, // 31: google.api.method_signature:extendee -> google.protobuf.MethodOptions - 24, // 32: google.api.default_host:extendee -> google.protobuf.ServiceOptions - 24, // 33: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions - 24, // 34: google.api.api_version:extendee -> google.protobuf.ServiceOptions - 35, // [35:35] is the sub-list for method output_type - 35, // [35:35] is the sub-list for method input_type - 35, // [35:35] is the sub-list for extension type_name - 31, // [31:35] is the sub-list for extension extendee - 0, // [0:31] is the sub-list for field type_name + 4, // 13: google.api.Publishing.library_settings:type_name -> google.api.ClientLibrarySettings + 19, // 14: google.api.JavaSettings.service_class_names:type_name -> google.api.JavaSettings.ServiceClassNamesEntry + 3, // 15: google.api.JavaSettings.common:type_name -> google.api.CommonLanguageSettings + 3, // 16: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings + 3, // 17: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings + 3, // 18: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings + 20, // 19: google.api.PythonSettings.experimental_features:type_name -> google.api.PythonSettings.ExperimentalFeatures + 3, // 20: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings + 3, // 21: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings + 21, // 22: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry + 22, // 23: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry + 3, // 24: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings + 3, // 25: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings + 23, // 26: google.api.GoSettings.renamed_services:type_name -> google.api.GoSettings.RenamedServicesEntry + 24, // 27: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning + 16, // 28: google.api.MethodSettings.batching:type_name -> google.api.BatchingConfigProto + 17, // 29: google.api.BatchingConfigProto.thresholds:type_name -> google.api.BatchingSettingsProto + 18, // 30: google.api.BatchingConfigProto.batch_descriptor:type_name -> google.api.BatchingDescriptorProto + 26, // 31: google.api.BatchingSettingsProto.delay_threshold:type_name -> google.protobuf.Duration + 2, // 32: google.api.BatchingSettingsProto.flow_control_limit_exceeded_behavior:type_name -> google.api.FlowControlLimitExceededBehaviorProto + 26, // 33: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration + 26, // 34: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration + 26, // 35: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration + 27, // 36: google.api.method_signature:extendee -> google.protobuf.MethodOptions + 28, // 37: google.api.default_host:extendee -> google.protobuf.ServiceOptions + 28, // 38: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions + 28, // 39: google.api.api_version:extendee -> google.protobuf.ServiceOptions + 40, // [40:40] is the sub-list for method output_type + 40, // [40:40] is the sub-list for method input_type + 40, // [40:40] is the sub-list for extension type_name + 36, // [36:40] is the sub-list for extension extendee + 0, // [0:36] is the sub-list for field type_name } func init() { file_google_api_client_proto_init() } @@ -2055,7 +2485,43 @@ func file_google_api_client_proto_init() { return nil } } + file_google_api_client_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BatchingConfigProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } file_google_api_client_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BatchingSettingsProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BatchingDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PythonSettings_ExperimentalFeatures); i { case 0: return &v.state @@ -2067,7 +2533,7 @@ func file_google_api_client_proto_init() { return nil } } - file_google_api_client_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_google_api_client_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MethodSettings_LongRunning); i { case 0: return &v.state @@ -2085,8 +2551,8 @@ func file_google_api_client_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_api_client_proto_rawDesc, - NumEnums: 2, - NumMessages: 19, + NumEnums: 3, + NumMessages: 22, NumExtensions: 4, NumServices: 0, }, diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go index 5d583b866..fc6d27b4a 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go @@ -1,4 +1,4 @@ -// Copyright 2025 Google LLC +// Copyright 2026 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go index 53e9dd1e9..b660d02c1 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go @@ -1,4 +1,4 @@ -// Copyright 2025 Google LLC +// Copyright 2026 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go index d30fcee4c..998205e18 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go @@ -1,4 +1,4 @@ -// Copyright 2025 Google LLC +// Copyright 2026 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go index 175974a86..ad2a3fbf8 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go @@ -1,4 +1,4 @@ -// Copyright 2025 Google LLC +// Copyright 2026 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go index b8c4aa71f..9a83b9636 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go @@ -1,4 +1,4 @@ -// Copyright 2025 Google LLC +// Copyright 2026 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -66,9 +66,13 @@ const ( // app_profile_id: profiles/prof_qux // } // -// The routing header consists of one or multiple key-value pairs. Every key -// and value must be percent-encoded, and joined together in the format of -// `key1=value1&key2=value2`. +// The routing header consists of one or multiple key-value pairs. The order of +// the key-value pairs is undefined, the order of the `routing_parameters` in +// the `RoutingRule` only matters for the evaluation order of the path +// templates when `field` is the same. See the examples below for more details. +// +// Every key and value in the routing header must be percent-encoded, +// and joined together in the following format: `key1=value1&key2=value2`. // The examples below skip the percent-encoding for readability. // // # Example 1 diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go index d083dde3e..902ae4498 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go @@ -1,4 +1,4 @@ -// Copyright 2025 Google LLC +// Copyright 2026 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go index a69c1d473..2cbb7b43b 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go @@ -1,4 +1,4 @@ -// Copyright 2025 Google LLC +// Copyright 2026 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go index e017ef071..842a5d9b5 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go @@ -1,4 +1,4 @@ -// Copyright 2025 Google LLC +// Copyright 2026 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -957,17 +957,17 @@ type BadRequest_FieldViolation struct { // In this example, in proto `field` could take one of the following values: // // - `full_name` for a violation in the `full_name` value - // - `email_addresses[1].email` for a violation in the `email` field of the + // - `email_addresses[0].email` for a violation in the `email` field of the // first `email_addresses` message - // - `email_addresses[3].type[2]` for a violation in the second `type` + // - `email_addresses[2].type[1]` for a violation in the second `type` // value in the third `email_addresses` message. // // In JSON, the same values are represented as: // // - `fullName` for a violation in the `fullName` value - // - `emailAddresses[1].email` for a violation in the `email` field of the + // - `emailAddresses[0].email` for a violation in the `email` field of the // first `emailAddresses` message - // - `emailAddresses[3].type[2]` for a violation in the second `type` + // - `emailAddresses[2].type[1]` for a violation in the second `type` // value in the third `emailAddresses` message. Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"` // A description of why the request element is bad. diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go index 06a3f7106..f25a7bcc7 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -1,4 +1,4 @@ -// Copyright 2025 Google LLC +// Copyright 2026 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -127,14 +127,13 @@ var file_google_rpc_status_proto_rawDesc = []byte{ 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, - 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x61, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, + 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x5e, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x0b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3b, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x75, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go index 52d530d7a..4c60518c7 100644 --- a/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -27,6 +27,8 @@ package attributes import ( "fmt" + "iter" + "maps" "strings" ) @@ -37,37 +39,46 @@ import ( // any) bool', it will be called by (*Attributes).Equal to determine whether // two values with the same key should be considered equal. type Attributes struct { - m map[any]any + parent *Attributes + key, value any } // New returns a new Attributes containing the key/value pair. func New(key, value any) *Attributes { - return &Attributes{m: map[any]any{key: value}} + return &Attributes{ + key: key, + value: value, + } } // WithValue returns a new Attributes containing the previous keys and values // and the new key/value pair. If the same key appears multiple times, the -// last value overwrites all previous values for that key. To remove an -// existing key, use a nil value. value should not be modified later. +// last value overwrites all previous values for that key. value should not be +// modified later. +// +// Note that Attributes do not support deletion. Avoid using untyped nil values. +// Since the Value method returns an untyped nil when a key is absent, it is +// impossible to distinguish between a missing key and a key explicitly set to +// an untyped nil. If you need to represent a value being unset, consider +// storing a specific sentinel type or a wrapper struct with a boolean field +// indicating presence. func (a *Attributes) WithValue(key, value any) *Attributes { - if a == nil { - return New(key, value) + return &Attributes{ + parent: a, + key: key, + value: value, } - n := &Attributes{m: make(map[any]any, len(a.m)+1)} - for k, v := range a.m { - n.m[k] = v - } - n.m[key] = value - return n } // Value returns the value associated with these attributes for key, or nil if // no value is associated with key. The returned value should not be modified. func (a *Attributes) Value(key any) any { - if a == nil { - return nil + for cur := a; cur != nil; cur = cur.parent { + if cur.key == key { + return cur.value + } } - return a.m[key] + return nil } // Equal returns whether a and o are equivalent. If 'Equal(o any) bool' is @@ -83,11 +94,15 @@ func (a *Attributes) Equal(o *Attributes) bool { if a == nil || o == nil { return false } - if len(a.m) != len(o.m) { - return false + if a == o { + return true } - for k, v := range a.m { - ov, ok := o.m[k] + m := maps.Collect(o.all()) + lenA := 0 + + for k, v := range a.all() { + lenA++ + ov, ok := m[k] if !ok { // o missing element of a return false @@ -101,7 +116,7 @@ func (a *Attributes) Equal(o *Attributes) bool { return false } } - return true + return lenA == len(m) } // String prints the attribute map. If any key or values throughout the map @@ -110,11 +125,11 @@ func (a *Attributes) String() string { var sb strings.Builder sb.WriteString("{") first := true - for k, v := range a.m { + for k, v := range a.all() { if !first { sb.WriteString(", ") } - sb.WriteString(fmt.Sprintf("%q: %q ", str(k), str(v))) + fmt.Fprintf(&sb, "%q: %q ", str(k), str(v)) first = false } sb.WriteString("}") @@ -139,3 +154,21 @@ func str(x any) (s string) { func (a *Attributes) MarshalJSON() ([]byte, error) { return []byte(a.String()), nil } + +// all returns an iterator that yields all key-value pairs in the Attributes +// chain. If a key appears multiple times, only the most recently added value +// is yielded. +func (a *Attributes) all() iter.Seq2[any, any] { + return func(yield func(any, any) bool) { + seen := map[any]bool{} + for cur := a; cur != nil; cur = cur.parent { + if seen[cur.key] { + continue + } + if !yield(cur.key, cur.value) { + return + } + seen[cur.key] = true + } + } +} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index d08b7ad63..326888ae3 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -33,6 +33,7 @@ import ( estats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" @@ -46,8 +47,8 @@ var ( ) // Register registers the balancer builder to the balancer map. b.Name -// (lowercased) will be used as the name registered with this builder. If the -// Builder implements ConfigParser, ParseConfig will be called when new service +// will be used as the name registered with this builder. If the Builder +// implements ConfigParser, ParseConfig will be called when new service // configs are received by the resolver, and the result will be provided to the // Balancer in UpdateClientConnState. // @@ -55,12 +56,12 @@ var ( // an init() function), and is not thread-safe. If multiple Balancers are // registered with the same name, the one registered last will take effect. func Register(b Builder) { - name := strings.ToLower(b.Name()) - if name != b.Name() { - // TODO: Skip the use of strings.ToLower() to index the map after v1.59 - // is released to switch to case sensitive balancer registry. Also, - // remove this warning and update the docstrings for Register and Get. - logger.Warningf("Balancer registered with name %q. grpc-go will be switching to case sensitive balancer registries soon", b.Name()) + name := b.Name() + if !envconfig.CaseSensitiveBalancerRegistries { + name = strings.ToLower(name) + if name != b.Name() { + logger.Warningf("Balancer registered with name %q. grpc-go will be switching to case sensitive balancer registries soon. After 2 releases, we will enable the env var by default.", b.Name()) + } } m[name] = b } @@ -78,16 +79,17 @@ func init() { } // Get returns the resolver builder registered with the given name. -// Note that the compare is done in a case-insensitive fashion. +// Note that the compare is done in a case-sensitive fashion. // If no builder is register with the name, nil will be returned. func Get(name string) Builder { - if strings.ToLower(name) != name { - // TODO: Skip the use of strings.ToLower() to index the map after v1.59 - // is released to switch to case sensitive balancer registry. Also, - // remove this warning and update the docstrings for Register and Get. - logger.Warningf("Balancer retrieved for name %q. grpc-go will be switching to case sensitive balancer registries soon", name) + if !envconfig.CaseSensitiveBalancerRegistries { + lowerName := strings.ToLower(name) + if lowerName != name { + logger.Warningf("Balancer retrieved for name %q. grpc-go will be switching to case sensitive balancer registries soon. After 2 releases, we will enable the env var by default.", name) + } + name = lowerName } - if b, ok := m[strings.ToLower(name)]; ok { + if b, ok := m[name]; ok { return b } return nil diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index 4d576876d..4399ba014 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -121,8 +121,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { sc.Connect() } } - for _, a := range b.subConns.Keys() { - sc, _ := b.subConns.Get(a) + for a, sc := range b.subConns.All() { // a was removed by resolver. if _, ok := addrsSet.Get(a); !ok { sc.Shutdown() @@ -171,8 +170,7 @@ func (b *baseBalancer) regeneratePicker() { readySCs := make(map[balancer.SubConn]SubConnInfo) // Filter out all ready SCs from full subConn map. - for _, addr := range b.subConns.Keys() { - sc, _ := b.subConns.Get(addr) + for addr, sc := range b.subConns.All() { if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { readySCs[sc] = SubConnInfo{Address: addr} } diff --git a/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go index 360db08eb..12479f698 100644 --- a/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go +++ b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go @@ -187,8 +187,7 @@ func (es *endpointSharding) UpdateClientConnState(state balancer.ClientConnState } } // Delete old children that are no longer present. - for _, e := range children.Keys() { - child, _ := children.Get(e) + for e, child := range children.All() { if _, ok := newChildren.Get(e); !ok { child.closeLocked() } @@ -212,7 +211,7 @@ func (es *endpointSharding) ResolverError(err error) { es.updateState() }() children := es.children.Load() - for _, child := range children.Values() { + for _, child := range children.All() { child.resolverErrorLocked(err) } } @@ -225,7 +224,7 @@ func (es *endpointSharding) Close() { es.childMu.Lock() defer es.childMu.Unlock() children := es.children.Load() - for _, child := range children.Values() { + for _, child := range children.All() { child.closeLocked() } } @@ -233,7 +232,7 @@ func (es *endpointSharding) Close() { func (es *endpointSharding) ExitIdle() { es.childMu.Lock() defer es.childMu.Unlock() - for _, bw := range es.children.Load().Values() { + for _, bw := range es.children.Load().All() { if !bw.isClosed { bw.child.ExitIdle() } @@ -255,7 +254,7 @@ func (es *endpointSharding) updateState() { children := es.children.Load() childStates := make([]ChildState, 0, children.Len()) - for _, child := range children.Values() { + for _, child := range children.All() { childState := child.childState childStates = append(childStates, childState) childPicker := childState.State.Picker diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go index dccd9f0bf..518a69d57 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -399,14 +399,14 @@ func (b *pickfirstBalancer) startFirstPassLocked() { b.firstPass = true b.numTF = 0 // Reset the connection attempt record for existing SubConns. - for _, sd := range b.subConns.Values() { + for _, sd := range b.subConns.All() { sd.connectionFailedInFirstPass = false } b.requestConnectionLocked() } func (b *pickfirstBalancer) closeSubConnsLocked() { - for _, sd := range b.subConns.Values() { + for _, sd := range b.subConns.All() { sd.subConn.Shutdown() } b.subConns = resolver.NewAddressMapV2[*scData]() @@ -506,7 +506,7 @@ func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) newAddrsMap.Set(addr, true) } - for _, oldAddr := range b.subConns.Keys() { + for oldAddr := range b.subConns.All() { if _, ok := newAddrsMap.Get(oldAddr); ok { continue } @@ -520,7 +520,7 @@ func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) // becomes ready, which means that all other subConn must be shutdown. func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) { b.cancelConnectionTimer() - for _, sd := range b.subConns.Values() { + for _, sd := range b.subConns.All() { if sd.subConn != selected.subConn { sd.subConn.Shutdown() } @@ -771,7 +771,7 @@ func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) { } // Connect() has been called on all the SubConns. The first pass can be // ended if all the SubConns have reported a failure. - for _, sd := range b.subConns.Values() { + for _, sd := range b.subConns.All() { if !sd.connectionFailedInFirstPass { return } @@ -782,7 +782,7 @@ func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) { Picker: &picker{err: lastErr}, }) // Start re-connecting all the SubConns that are already in IDLE. - for _, sd := range b.subConns.Values() { + for _, sd := range b.subConns.All() { if sd.rawConnectivityState == connectivity.Idle { sd.subConn.Connect() } diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 42c61cf9f..296123e20 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.10 +// protoc-gen-go v1.36.11 // protoc v5.27.1 // source: grpc/binlog/v1/binarylog.proto diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 0bcd16dbb..a6083c3b0 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -22,7 +22,6 @@ import ( "context" "crypto/tls" "crypto/x509" - "errors" "fmt" "net" "net/url" @@ -52,22 +51,21 @@ func (t TLSInfo) AuthType() string { } // ValidateAuthority validates the provided authority being used to override the -// :authority header by verifying it against the peer certificates. It returns a +// :authority header by verifying it against the peer certificate. It returns a // non-nil error if the validation fails. func (t TLSInfo) ValidateAuthority(authority string) error { - var errs []error host, _, err := net.SplitHostPort(authority) if err != nil { host = authority } - for _, cert := range t.State.PeerCertificates { - var err error - if err = cert.VerifyHostname(host); err == nil { - return nil - } - errs = append(errs, err) + + // Verify authority against the leaf certificate. + if len(t.State.PeerCertificates) == 0 { + // This is not expected to happen as the TLS handshake has already + // completed and should have populated PeerCertificates. + return fmt.Errorf("credentials: no peer certificates found to verify authority %q", host) } - return fmt.Errorf("credentials: invalid authority %q: %v", authority, errors.Join(errs...)) + return t.State.PeerCertificates[0].VerifyHostname(host) } // cipherSuiteLookup returns the string version of a TLS cipher suite ID. diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 7a5ac2e7c..4ec5f9cd0 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -705,10 +705,11 @@ func WithDisableHealthCheck() DialOption { func defaultDialOptions() dialOptions { return dialOptions{ copts: transport.ConnectOptions{ - ReadBufferSize: defaultReadBufSize, - WriteBufferSize: defaultWriteBufSize, - UserAgent: grpcUA, - BufferPool: mem.DefaultBufferPool(), + ReadBufferSize: defaultReadBufSize, + WriteBufferSize: defaultWriteBufSize, + SharedWriteBuffer: true, + UserAgent: grpcUA, + BufferPool: mem.DefaultBufferPool(), }, bs: internalbackoff.DefaultExponential, idleTimeout: 30 * time.Minute, diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 8f7d9f6bb..dcb98cdbc 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.10 +// protoc-gen-go v1.36.11 // protoc v5.27.1 // source: grpc/health/v1/health.proto diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index e99cd5c83..9e10fdd2e 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.6.0 +// - protoc-gen-go-grpc v1.6.1 // - protoc v5.27.1 // source: grpc/health/v1/health.proto diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index e8dc79129..3ae45faa4 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -54,17 +54,16 @@ var ( // XDSEndpointHashKeyBackwardCompat controls the parsing of the endpoint hash // key from EDS LbEndpoint metadata. Endpoint hash keys can be disabled by - // setting "GRPC_XDS_ENDPOINT_HASH_KEY_BACKWARD_COMPAT" to "true". When the - // implementation of A76 is stable, we will flip the default value to false - // in a subsequent release. A final release will remove this environment - // variable, enabling the new behavior unconditionally. - XDSEndpointHashKeyBackwardCompat = boolFromEnv("GRPC_XDS_ENDPOINT_HASH_KEY_BACKWARD_COMPAT", true) + // setting "GRPC_XDS_ENDPOINT_HASH_KEY_BACKWARD_COMPAT" to "true". A future + // release will remove this environment variable, enabling the new behavior + // unconditionally. + XDSEndpointHashKeyBackwardCompat = boolFromEnv("GRPC_XDS_ENDPOINT_HASH_KEY_BACKWARD_COMPAT", false) // RingHashSetRequestHashKey is set if the ring hash balancer can get the // request hash header by setting the "requestHashHeader" field, according - // to gRFC A76. It can be enabled by setting the environment variable - // "GRPC_EXPERIMENTAL_RING_HASH_SET_REQUEST_HASH_KEY" to "true". - RingHashSetRequestHashKey = boolFromEnv("GRPC_EXPERIMENTAL_RING_HASH_SET_REQUEST_HASH_KEY", false) + // to gRFC A76. It can be disabled by setting the environment variable + // "GRPC_EXPERIMENTAL_RING_HASH_SET_REQUEST_HASH_KEY" to "false". + RingHashSetRequestHashKey = boolFromEnv("GRPC_EXPERIMENTAL_RING_HASH_SET_REQUEST_HASH_KEY", true) // ALTSHandshakerKeepaliveParams is set if we should add the // KeepaliveParams when dial the ALTS handshaker service. @@ -78,6 +77,14 @@ var ( // - The DNS resolver is being used. EnableDefaultPortForProxyTarget = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_DEFAULT_PORT_FOR_PROXY_TARGET", true) + // CaseSensitiveBalancerRegistries is set if the balancer registry should be + // case-sensitive. This is disabled by default, but can be enabled by setting + // the env variable "GRPC_GO_EXPERIMENTAL_CASE_SENSITIVE_BALANCER_REGISTRIES" + // to "true". + // + // TODO: After 2 releases, we will enable the env var by default. + CaseSensitiveBalancerRegistries = boolFromEnv("GRPC_GO_EXPERIMENTAL_CASE_SENSITIVE_BALANCER_REGISTRIES", false) + // XDSAuthorityRewrite indicates whether xDS authority rewriting is enabled. // This feature is defined in gRFC A81 and is enabled by setting the // environment variable GRPC_EXPERIMENTAL_XDS_AUTHORITY_REWRITE to "true". @@ -88,6 +95,37 @@ var ( // feature can be disabled by setting the environment variable // GRPC_EXPERIMENTAL_PF_WEIGHTED_SHUFFLING to "false". PickFirstWeightedShuffling = boolFromEnv("GRPC_EXPERIMENTAL_PF_WEIGHTED_SHUFFLING", true) + + // XDSRecoverPanicInResourceParsing indicates whether the xdsclient should + // recover from panics while parsing xDS resources. + // + // This feature can be disabled (e.g. for fuzz testing) by setting the + // environment variable "GRPC_GO_EXPERIMENTAL_XDS_RESOURCE_PANIC_RECOVERY" + // to "false". + XDSRecoverPanicInResourceParsing = boolFromEnv("GRPC_GO_EXPERIMENTAL_XDS_RESOURCE_PANIC_RECOVERY", true) + + // DisableStrictPathChecking indicates whether strict path checking is + // disabled. This feature can be disabled by setting the environment + // variable GRPC_GO_EXPERIMENTAL_DISABLE_STRICT_PATH_CHECKING to "true". + // + // When strict path checking is enabled, gRPC will reject requests with + // paths that do not conform to the gRPC over HTTP/2 specification found at + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md. + // + // When disabled, gRPC will allow paths that do not contain a leading slash. + // Enabling strict path checking is recommended for security reasons, as it + // prevents potential path traversal vulnerabilities. + // + // A future release will remove this environment variable, enabling strict + // path checking behavior unconditionally. + DisableStrictPathChecking = boolFromEnv("GRPC_GO_EXPERIMENTAL_DISABLE_STRICT_PATH_CHECKING", false) + + // EnablePriorityLBChildPolicyCache controls whether the priority balancer + // should cache child balancers that are removed from the LB policy config, + // for a period of 15 minutes. This is disabled by default, but can be + // enabled by setting the env variable + // GRPC_EXPERIMENTAL_ENABLE_PRIORITY_LB_CHILD_POLICY_CACHE to true. + EnablePriorityLBChildPolicyCache = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_PRIORITY_LB_CHILD_POLICY_CACHE", false) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/mem/buffer_pool.go b/vendor/google.golang.org/grpc/internal/mem/buffer_pool.go new file mode 100644 index 000000000..c2348a82e --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/mem/buffer_pool.go @@ -0,0 +1,338 @@ +/* + * + * Copyright 2026 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package mem provides utilities that facilitate memory reuse in byte slices +// that are used as buffers. +package mem + +import ( + "fmt" + "math/bits" + "slices" + "sort" + "sync" +) + +const ( + goPageSize = 4 * 1024 // 4KiB. N.B. this must be a power of 2. +) + +var uintSize = bits.UintSize // use a variable for mocking during tests. + +// bufferPool is a copy of the public bufferPool interface used to avoid +// circular dependencies. +type bufferPool interface { + // Get returns a buffer with specified length from the pool. + Get(length int) *[]byte + + // Put returns a buffer to the pool. + // + // The provided pointer must hold a prefix of the buffer obtained via + // BufferPool.Get to ensure the buffer's entire capacity can be re-used. + Put(*[]byte) +} + +// BinaryTieredBufferPool is a buffer pool that uses multiple sub-pools with +// power-of-two sizes. +type BinaryTieredBufferPool struct { + // exponentToNextLargestPoolMap maps a power-of-two exponent (e.g., 12 for + // 4KB) to the index of the next largest sizedBufferPool. This is used by + // Get() to find the smallest pool that can satisfy a request for a given + // size. + exponentToNextLargestPoolMap []int + // exponentToPreviousLargestPoolMap maps a power-of-two exponent to the + // index of the previous largest sizedBufferPool. This is used by Put() + // to return a buffer to the most appropriate pool based on its capacity. + exponentToPreviousLargestPoolMap []int + sizedPools []bufferPool + fallbackPool bufferPool + maxPoolCap int // Optimization: Cache max capacity +} + +// NewBinaryTieredBufferPool returns a BufferPool backed by multiple sub-pools. +// This structure enables O(1) lookup time for Get and Put operations. +// +// The arguments provided are the exponents for the buffer capacities (powers +// of 2), not the raw byte sizes. For example, to create a pool of 16KB buffers +// (2^14 bytes), pass 14 as the argument. +func NewBinaryTieredBufferPool(powerOfTwoExponents ...uint8) (*BinaryTieredBufferPool, error) { + return newBinaryTiered(func(size int) bufferPool { + return newSizedBufferPool(size, true) + }, &simpleBufferPool{shouldZero: true}, powerOfTwoExponents...) +} + +// NewDirtyBinaryTieredBufferPool returns a BufferPool backed by multiple +// sub-pools. It is similar to NewBinaryTieredBufferPool but it does not +// initialize the buffers before returning them. +func NewDirtyBinaryTieredBufferPool(powerOfTwoExponents ...uint8) (*BinaryTieredBufferPool, error) { + return newBinaryTiered(func(size int) bufferPool { + return newSizedBufferPool(size, false) + }, &simpleBufferPool{shouldZero: false}, powerOfTwoExponents...) +} + +func newBinaryTiered(sizedPoolFactory func(int) bufferPool, fallbackPool bufferPool, powerOfTwoExponents ...uint8) (*BinaryTieredBufferPool, error) { + slices.Sort(powerOfTwoExponents) + powerOfTwoExponents = slices.Compact(powerOfTwoExponents) + + // Determine the maximum exponent we need to support. This depends on the + // word size (32-bit vs 64-bit). + maxExponent := uintSize - 2 + indexOfNextLargestBit := slices.Repeat([]int{-1}, maxExponent+1) + indexOfPreviousLargestBit := slices.Repeat([]int{-1}, maxExponent+1) + + maxTier := 0 + pools := make([]bufferPool, 0, len(powerOfTwoExponents)) + + for i, exp := range powerOfTwoExponents { + // Allocating slices of size > 2^maxExponent isn't possible on + // maxExponent-bit machines. + if int(exp) > maxExponent { + return nil, fmt.Errorf("mem: allocating slice of size 2^%d is not possible", exp) + } + tierSize := 1 << exp + pools = append(pools, sizedPoolFactory(tierSize)) + maxTier = max(maxTier, tierSize) + + // Map the exact power of 2 to this pool index. + indexOfNextLargestBit[exp] = i + indexOfPreviousLargestBit[exp] = i + } + + // Fill gaps for Get() (Next Largest) + // We iterate backwards. If current is empty, take the value from the right (larger). + for i := maxExponent - 1; i >= 0; i-- { + if indexOfNextLargestBit[i] == -1 { + indexOfNextLargestBit[i] = indexOfNextLargestBit[i+1] + } + } + + // Fill gaps for Put() (Previous Largest) + // We iterate forwards. If current is empty, take the value from the left (smaller). + for i := 1; i <= maxExponent; i++ { + if indexOfPreviousLargestBit[i] == -1 { + indexOfPreviousLargestBit[i] = indexOfPreviousLargestBit[i-1] + } + } + + return &BinaryTieredBufferPool{ + exponentToNextLargestPoolMap: indexOfNextLargestBit, + exponentToPreviousLargestPoolMap: indexOfPreviousLargestBit, + sizedPools: pools, + maxPoolCap: maxTier, + fallbackPool: fallbackPool, + }, nil +} + +// Get returns a buffer with specified length from the pool. +func (b *BinaryTieredBufferPool) Get(size int) *[]byte { + return b.poolForGet(size).Get(size) +} + +func (b *BinaryTieredBufferPool) poolForGet(size int) bufferPool { + if size == 0 || size > b.maxPoolCap { + return b.fallbackPool + } + + // Calculate the exponent of the smallest power of 2 >= size. + // We subtract 1 from size to handle exact powers of 2 correctly. + // + // Examples: + // size=16 (0b10000) -> size-1=15 (0b01111) -> bits.Len=4 -> Pool for 2^4 + // size=17 (0b10001) -> size-1=16 (0b10000) -> bits.Len=5 -> Pool for 2^5 + querySize := uint(size - 1) + poolIdx := b.exponentToNextLargestPoolMap[bits.Len(querySize)] + + return b.sizedPools[poolIdx] +} + +// Put returns a buffer to the pool. +func (b *BinaryTieredBufferPool) Put(buf *[]byte) { + // We pass the capacity of the buffer, and not the size of the buffer here. + // If we did the latter, all buffers would eventually move to the smallest + // pool. + b.poolForPut(cap(*buf)).Put(buf) +} + +func (b *BinaryTieredBufferPool) poolForPut(bCap int) bufferPool { + if bCap == 0 { + return NopBufferPool{} + } + if bCap > b.maxPoolCap { + return b.fallbackPool + } + // Find the pool with the largest capacity <= bCap. + // + // We calculate the exponent of the largest power of 2 <= bCap. + // bits.Len(x) returns the minimum number of bits required to represent x; + // i.e. the number of bits up to and including the most significant bit. + // Subtracting 1 gives the 0-based index of the most significant bit, + // which is the exponent of the largest power of 2 <= bCap. + // + // Examples: + // cap=16 (0b10000) -> Len=5 -> 5-1=4 -> 2^4 + // cap=15 (0b01111) -> Len=4 -> 4-1=3 -> 2^3 + largestPowerOfTwo := bits.Len(uint(bCap)) - 1 + poolIdx := b.exponentToPreviousLargestPoolMap[largestPowerOfTwo] + // The buffer is smaller than the smallest power of 2, discard it. + if poolIdx == -1 { + // Buffer is smaller than our smallest pool bucket. + return NopBufferPool{} + } + return b.sizedPools[poolIdx] +} + +// NopBufferPool is a buffer pool that returns new buffers without pooling. +type NopBufferPool struct{} + +// Get returns a buffer with specified length from the pool. +func (NopBufferPool) Get(length int) *[]byte { + b := make([]byte, length) + return &b +} + +// Put returns a buffer to the pool. +func (NopBufferPool) Put(*[]byte) { +} + +// sizedBufferPool is a BufferPool implementation that is optimized for specific +// buffer sizes. For example, HTTP/2 frames within gRPC have a default max size +// of 16kb and a sizedBufferPool can be configured to only return buffers with a +// capacity of 16kb. Note that however it does not support returning larger +// buffers and in fact panics if such a buffer is requested. Because of this, +// this BufferPool implementation is not meant to be used on its own and rather +// is intended to be embedded in a TieredBufferPool such that Get is only +// invoked when the required size is smaller than or equal to defaultSize. +type sizedBufferPool struct { + pool sync.Pool + defaultSize int + shouldZero bool +} + +func (p *sizedBufferPool) Get(size int) *[]byte { + buf, ok := p.pool.Get().(*[]byte) + if !ok { + buf := make([]byte, size, p.defaultSize) + return &buf + } + b := *buf + if p.shouldZero { + clear(b[:cap(b)]) + } + *buf = b[:size] + return buf +} + +func (p *sizedBufferPool) Put(buf *[]byte) { + if cap(*buf) < p.defaultSize { + // Ignore buffers that are too small to fit in the pool. Otherwise, when + // Get is called it will panic as it tries to index outside the bounds + // of the buffer. + return + } + p.pool.Put(buf) +} + +func newSizedBufferPool(size int, zero bool) *sizedBufferPool { + return &sizedBufferPool{ + defaultSize: size, + shouldZero: zero, + } +} + +// TieredBufferPool implements the BufferPool interface with multiple tiers of +// buffer pools for different sizes of buffers. +type TieredBufferPool struct { + sizedPools []*sizedBufferPool + fallbackPool simpleBufferPool +} + +// NewTieredBufferPool returns a BufferPool implementation that uses multiple +// underlying pools of the given pool sizes. +func NewTieredBufferPool(poolSizes ...int) *TieredBufferPool { + sort.Ints(poolSizes) + pools := make([]*sizedBufferPool, len(poolSizes)) + for i, s := range poolSizes { + pools[i] = newSizedBufferPool(s, true) + } + return &TieredBufferPool{ + sizedPools: pools, + fallbackPool: simpleBufferPool{shouldZero: true}, + } +} + +// Get returns a buffer with specified length from the pool. +func (p *TieredBufferPool) Get(size int) *[]byte { + return p.getPool(size).Get(size) +} + +// Put returns a buffer to the pool. +func (p *TieredBufferPool) Put(buf *[]byte) { + p.getPool(cap(*buf)).Put(buf) +} + +func (p *TieredBufferPool) getPool(size int) bufferPool { + poolIdx := sort.Search(len(p.sizedPools), func(i int) bool { + return p.sizedPools[i].defaultSize >= size + }) + + if poolIdx == len(p.sizedPools) { + return &p.fallbackPool + } + + return p.sizedPools[poolIdx] +} + +// simpleBufferPool is an implementation of the BufferPool interface that +// attempts to pool buffers with a sync.Pool. When Get is invoked, it tries to +// acquire a buffer from the pool but if that buffer is too small, it returns it +// to the pool and creates a new one. +type simpleBufferPool struct { + pool sync.Pool + shouldZero bool +} + +func (p *simpleBufferPool) Get(size int) *[]byte { + bs, ok := p.pool.Get().(*[]byte) + if ok && cap(*bs) >= size { + if p.shouldZero { + clear((*bs)[:cap(*bs)]) + } + *bs = (*bs)[:size] + return bs + } + + // A buffer was pulled from the pool, but it is too small. Put it back in + // the pool and create one large enough. + if ok { + p.pool.Put(bs) + } + + // If we're going to allocate, round up to the nearest page. This way if + // requests frequently arrive with small variation we don't allocate + // repeatedly if we get unlucky and they increase over time. By default we + // only allocate here if size > 1MiB. Because goPageSize is a power of 2, we + // can round up efficiently. + allocSize := (size + goPageSize - 1) & ^(goPageSize - 1) + + b := make([]byte, size, allocSize) + return &b +} + +func (p *simpleBufferPool) Put(buf *[]byte) { + p.pool.Put(buf) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/client_stream.go b/vendor/google.golang.org/grpc/internal/transport/client_stream.go index 980452519..cd8152ef1 100644 --- a/vendor/google.golang.org/grpc/internal/transport/client_stream.go +++ b/vendor/google.golang.org/grpc/internal/transport/client_stream.go @@ -24,6 +24,7 @@ import ( "golang.org/x/net/http2" "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/stats" "google.golang.org/grpc/status" ) @@ -46,10 +47,11 @@ type ClientStream struct { // meaningful after headerChan is closed (always call waitOnHeader() before // reading its value). headerValid bool - noHeaders bool // set if the client never received headers (set only after the stream is done). - headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. - bytesReceived atomic.Bool // indicates whether any bytes have been received on this stream - unprocessed atomic.Bool // set if the server sends a refused stream or GOAWAY including this stream + noHeaders bool // set if the client never received headers (set only after the stream is done). + headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + bytesReceived atomic.Bool // indicates whether any bytes have been received on this stream + unprocessed atomic.Bool // set if the server sends a refused stream or GOAWAY including this stream + statsHandler stats.Handler // nil for internal streams (e.g., health check, ORCA) where telemetry is not supported. } // Read reads an n byte message from the input stream. diff --git a/vendor/google.golang.org/grpc/internal/transport/defaults.go b/vendor/google.golang.org/grpc/internal/transport/defaults.go index bc8ee0747..0b2269a50 100644 --- a/vendor/google.golang.org/grpc/internal/transport/defaults.go +++ b/vendor/google.golang.org/grpc/internal/transport/defaults.go @@ -46,6 +46,7 @@ const ( defaultWriteQuota = 64 * 1024 defaultClientMaxHeaderListSize = uint32(16 << 20) defaultServerMaxHeaderListSize = uint32(16 << 20) + upcomingDefaultHeaderListSize = uint32(8 << 10) ) // MaxStreamID is the upper bound for the stream ID before the current diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 38ca031af..c943503f3 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -478,7 +478,7 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts return t, nil } -func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientStream { +func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr, handler stats.Handler) *ClientStream { // TODO(zhaoq): Handle uint32 overflow of Stream.id. s := &ClientStream{ Stream: Stream{ @@ -486,10 +486,11 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientSt sendCompress: callHdr.SendCompress, contentSubtype: callHdr.ContentSubtype, }, - ct: t, - done: make(chan struct{}), - headerChan: make(chan struct{}), - doneFunc: callHdr.DoneFunc, + ct: t, + done: make(chan struct{}), + headerChan: make(chan struct{}), + doneFunc: callHdr.DoneFunc, + statsHandler: handler, } s.Stream.buf.init() s.Stream.wq.init(defaultWriteQuota, s.done) @@ -744,7 +745,7 @@ func (e NewStreamError) Error() string { // NewStream creates a stream and registers it into the transport as "active" // streams. All non-nil errors returned will be *NewStreamError. -func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error) { +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr, handler stats.Handler) (*ClientStream, error) { ctx = peer.NewContext(ctx, t.Peer()) // ServerName field of the resolver returned address takes precedence over @@ -781,7 +782,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS if err != nil { return nil, &NewStreamError{Err: err, AllowTransparentRetry: false} } - s := t.newStream(ctx, callHdr) + s := t.newStream(ctx, callHdr, handler) cleanup := func(err error) { if s.swapState(streamDone) == streamDone { // If it was already done, return. @@ -870,11 +871,15 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS } var sz int64 for _, f := range hdr.hf { - if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { + sz += int64(f.Size()) + if sz > int64(*t.maxSendHeaderListSize) { hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize) return false } } + if sz > int64(upcomingDefaultHeaderListSize) { + t.logger.Warningf("Header list size to send (%d bytes) is larger than the upcoming default limit (%d bytes). In a future release, this will be restricted to %d bytes.", sz, upcomingDefaultHeaderListSize, upcomingDefaultHeaderListSize) + } return true } for { @@ -902,7 +907,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true} } } - if t.statsHandler != nil { + if s.statsHandler != nil { header, ok := metadata.FromOutgoingContext(ctx) if ok { header.Set("user-agent", t.userAgent) @@ -911,7 +916,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS } // Note: The header fields are compressed with hpack after this call returns. // No WireLength field is set here. - t.statsHandler.HandleRPC(s.ctx, &stats.OutHeader{ + s.statsHandler.HandleRPC(s.ctx, &stats.OutHeader{ Client: true, FullMethod: callHdr.Method, RemoteAddr: t.remoteAddr, @@ -1587,16 +1592,16 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { } } - if t.statsHandler != nil { + if s.statsHandler != nil { if !endStream { - t.statsHandler.HandleRPC(s.ctx, &stats.InHeader{ + s.statsHandler.HandleRPC(s.ctx, &stats.InHeader{ Client: true, WireLength: int(frame.Header().Length), Header: metadata.MD(mdata).Copy(), Compression: s.recvCompress, }) } else { - t.statsHandler.HandleRPC(s.ctx, &stats.InTrailer{ + s.statsHandler.HandleRPC(s.ctx, &stats.InTrailer{ Client: true, WireLength: int(frame.Header().Length), Trailer: metadata.MD(mdata).Copy(), diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index a1a14e14f..3a8c36e4f 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -940,13 +940,17 @@ func (t *http2Server) checkForHeaderListSize(hf []hpack.HeaderField) bool { } var sz int64 for _, f := range hf { - if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { + sz += int64(f.Size()) + if sz > int64(*t.maxSendHeaderListSize) { if t.logger.V(logLevel) { t.logger.Infof("Header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) } return false } } + if sz > int64(upcomingDefaultHeaderListSize) { + t.logger.Warningf("Header list size to send (%d bytes) is larger than the upcoming default limit (%d bytes). In a future release, this will be restricted to %d bytes.", sz, upcomingDefaultHeaderListSize, upcomingDefaultHeaderListSize) + } return true } diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 10b9155f0..b86094da9 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -617,7 +617,7 @@ type ClientTransport interface { GracefulClose() // NewStream creates a Stream for an RPC. - NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error) + NewStream(ctx context.Context, callHdr *CallHdr, handler stats.Handler) (*ClientStream, error) // Error returns a channel that is closed when some I/O error // happens. Typically the caller should have a goroutine to monitor diff --git a/vendor/google.golang.org/grpc/mem/buffer_pool.go b/vendor/google.golang.org/grpc/mem/buffer_pool.go index 2ea763a49..3b02b9091 100644 --- a/vendor/google.golang.org/grpc/mem/buffer_pool.go +++ b/vendor/google.golang.org/grpc/mem/buffer_pool.go @@ -19,10 +19,10 @@ package mem import ( - "sort" - "sync" + "fmt" "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/mem" ) // BufferPool is a pool of buffers that can be shared and reused, resulting in @@ -38,20 +38,23 @@ type BufferPool interface { Put(*[]byte) } -const goPageSize = 4 << 10 // 4KiB. N.B. this must be a power of 2. - -var defaultBufferPoolSizes = []int{ - 256, - goPageSize, - 16 << 10, // 16KB (max HTTP/2 frame size used by gRPC) - 32 << 10, // 32KB (default buffer size for io.Copy) - 1 << 20, // 1MB -} - -var defaultBufferPool BufferPool +var ( + defaultBufferPoolSizeExponents = []uint8{ + 8, + 12, // Go page size, 4KB + 14, // 16KB (max HTTP/2 frame size used by gRPC) + 15, // 32KB (default buffer size for io.Copy) + 20, // 1MB + } + defaultBufferPool BufferPool +) func init() { - defaultBufferPool = NewTieredBufferPool(defaultBufferPoolSizes...) + var err error + defaultBufferPool, err = NewBinaryTieredBufferPool(defaultBufferPoolSizeExponents...) + if err != nil { + panic(fmt.Sprintf("Failed to create default buffer pool: %v", err)) + } internal.SetDefaultBufferPool = func(pool BufferPool) { defaultBufferPool = pool @@ -72,134 +75,22 @@ func DefaultBufferPool() BufferPool { // NewTieredBufferPool returns a BufferPool implementation that uses multiple // underlying pools of the given pool sizes. func NewTieredBufferPool(poolSizes ...int) BufferPool { - sort.Ints(poolSizes) - pools := make([]*sizedBufferPool, len(poolSizes)) - for i, s := range poolSizes { - pools[i] = newSizedBufferPool(s) - } - return &tieredBufferPool{ - sizedPools: pools, - } -} - -// tieredBufferPool implements the BufferPool interface with multiple tiers of -// buffer pools for different sizes of buffers. -type tieredBufferPool struct { - sizedPools []*sizedBufferPool - fallbackPool simpleBufferPool -} - -func (p *tieredBufferPool) Get(size int) *[]byte { - return p.getPool(size).Get(size) + return mem.NewTieredBufferPool(poolSizes...) } -func (p *tieredBufferPool) Put(buf *[]byte) { - p.getPool(cap(*buf)).Put(buf) +// NewBinaryTieredBufferPool returns a BufferPool backed by multiple sub-pools. +// This structure enables O(1) lookup time for Get and Put operations. +// +// The arguments provided are the exponents for the buffer capacities (powers +// of 2), not the raw byte sizes. For example, to create a pool of 16KB buffers +// (2^14 bytes), pass 14 as the argument. +func NewBinaryTieredBufferPool(powerOfTwoExponents ...uint8) (BufferPool, error) { + return mem.NewBinaryTieredBufferPool(powerOfTwoExponents...) } -func (p *tieredBufferPool) getPool(size int) BufferPool { - poolIdx := sort.Search(len(p.sizedPools), func(i int) bool { - return p.sizedPools[i].defaultSize >= size - }) - - if poolIdx == len(p.sizedPools) { - return &p.fallbackPool - } - - return p.sizedPools[poolIdx] -} - -// sizedBufferPool is a BufferPool implementation that is optimized for specific -// buffer sizes. For example, HTTP/2 frames within gRPC have a default max size -// of 16kb and a sizedBufferPool can be configured to only return buffers with a -// capacity of 16kb. Note that however it does not support returning larger -// buffers and in fact panics if such a buffer is requested. Because of this, -// this BufferPool implementation is not meant to be used on its own and rather -// is intended to be embedded in a tieredBufferPool such that Get is only -// invoked when the required size is smaller than or equal to defaultSize. -type sizedBufferPool struct { - pool sync.Pool - defaultSize int -} - -func (p *sizedBufferPool) Get(size int) *[]byte { - buf, ok := p.pool.Get().(*[]byte) - if !ok { - buf := make([]byte, size, p.defaultSize) - return &buf - } - b := *buf - clear(b[:cap(b)]) - *buf = b[:size] - return buf -} - -func (p *sizedBufferPool) Put(buf *[]byte) { - if cap(*buf) < p.defaultSize { - // Ignore buffers that are too small to fit in the pool. Otherwise, when - // Get is called it will panic as it tries to index outside the bounds - // of the buffer. - return - } - p.pool.Put(buf) -} - -func newSizedBufferPool(size int) *sizedBufferPool { - return &sizedBufferPool{ - defaultSize: size, - } -} - -var _ BufferPool = (*simpleBufferPool)(nil) - -// simpleBufferPool is an implementation of the BufferPool interface that -// attempts to pool buffers with a sync.Pool. When Get is invoked, it tries to -// acquire a buffer from the pool but if that buffer is too small, it returns it -// to the pool and creates a new one. -type simpleBufferPool struct { - pool sync.Pool -} - -func (p *simpleBufferPool) Get(size int) *[]byte { - bs, ok := p.pool.Get().(*[]byte) - if ok && cap(*bs) >= size { - clear((*bs)[:cap(*bs)]) - *bs = (*bs)[:size] - return bs - } - - // A buffer was pulled from the pool, but it is too small. Put it back in - // the pool and create one large enough. - if ok { - p.pool.Put(bs) - } - - // If we're going to allocate, round up to the nearest page. This way if - // requests frequently arrive with small variation we don't allocate - // repeatedly if we get unlucky and they increase over time. By default we - // only allocate here if size > 1MiB. Because goPageSize is a power of 2, we - // can round up efficiently. - allocSize := (size + goPageSize - 1) & ^(goPageSize - 1) - - b := make([]byte, size, allocSize) - return &b -} - -func (p *simpleBufferPool) Put(buf *[]byte) { - p.pool.Put(buf) -} - -var _ BufferPool = NopBufferPool{} - // NopBufferPool is a buffer pool that returns new buffers without pooling. -type NopBufferPool struct{} - -// Get returns a buffer with specified length from the pool. -func (NopBufferPool) Get(length int) *[]byte { - b := make([]byte, length) - return &b +type NopBufferPool struct { + mem.NopBufferPool } -// Put returns a buffer to the pool. -func (NopBufferPool) Put(*[]byte) { -} +var _ BufferPool = NopBufferPool{} diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index aa52bfe95..0183ab22f 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -192,7 +192,9 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. // DoneInfo with default value works. pickResult.Done(balancer.DoneInfo{}) } - logger.Infof("blockingPicker: the picked transport is not ready, loop back to repick") + if logger.V(2) { + logger.Infof("blockingPicker: the picked transport is not ready, loop back to repick") + } // If ok == false, ac.state is not READY. // A valid picker always returns READY subConn. This means the state of ac // just changed, and picker will be updated shortly. diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go index c3c15ac96..789a5abab 100644 --- a/vendor/google.golang.org/grpc/resolver/map.go +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -20,6 +20,7 @@ package resolver import ( "encoding/base64" + "iter" "sort" "strings" ) @@ -135,6 +136,7 @@ func (a *AddressMapV2[T]) Len() int { } // Keys returns a slice of all current map keys. +// Deprecated: Use AddressMapV2.All() instead. func (a *AddressMapV2[T]) Keys() []Address { ret := make([]Address, 0, a.Len()) for _, entryList := range a.m { @@ -146,6 +148,7 @@ func (a *AddressMapV2[T]) Keys() []Address { } // Values returns a slice of all current map values. +// Deprecated: Use AddressMapV2.All() instead. func (a *AddressMapV2[T]) Values() []T { ret := make([]T, 0, a.Len()) for _, entryList := range a.m { @@ -156,6 +159,19 @@ func (a *AddressMapV2[T]) Values() []T { return ret } +// All returns an iterator over all elements. +func (a *AddressMapV2[T]) All() iter.Seq2[Address, T] { + return func(yield func(Address, T) bool) { + for _, entryList := range a.m { + for _, entry := range entryList { + if !yield(entry.addr, entry.value) { + return + } + } + } + } +} + type endpointMapKey string // EndpointMap is a map of endpoints to arbitrary values keyed on only the @@ -223,6 +239,7 @@ func (em *EndpointMap[T]) Len() int { // the unordered set of addresses. Thus, endpoint information returned is not // the full endpoint data (drops duplicated addresses and attributes) but can be // used for EndpointMap accesses. +// Deprecated: Use EndpointMap.All() instead. func (em *EndpointMap[T]) Keys() []Endpoint { ret := make([]Endpoint, 0, len(em.endpoints)) for _, en := range em.endpoints { @@ -232,6 +249,7 @@ func (em *EndpointMap[T]) Keys() []Endpoint { } // Values returns a slice of all current map values. +// Deprecated: Use EndpointMap.All() instead. func (em *EndpointMap[T]) Values() []T { ret := make([]T, 0, len(em.endpoints)) for _, val := range em.endpoints { @@ -240,6 +258,22 @@ func (em *EndpointMap[T]) Values() []T { return ret } +// All returns an iterator over all elements. +// The map keys are endpoints specifying the addresses present in the endpoint +// map, in which uniqueness is determined by the unordered set of addresses. +// Thus, endpoint information returned is not the full endpoint data (drops +// duplicated addresses and attributes) but can be used for EndpointMap +// accesses. +func (em *EndpointMap[T]) All() iter.Seq2[Endpoint, T] { + return func(yield func(Endpoint, T) bool) { + for _, en := range em.endpoints { + if !yield(en.decodedKey, en.value) { + return + } + } + } +} + // Delete removes the specified endpoint from the map. func (em *EndpointMap[T]) Delete(e Endpoint) { en := encodeEndpoint(e) diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 8160f9430..ee7f7dead 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -961,24 +961,32 @@ func recvAndDecompress(p *parser, s recvCompressor, dc Decompressor, maxReceiveM return out, nil } -// decompress processes the given data by decompressing it using either a custom decompressor or a standard compressor. -// If a custom decompressor is provided, it takes precedence. The function validates that the decompressed data -// does not exceed the specified maximum size and returns an error if this limit is exceeded. -// On success, it returns the decompressed data. Otherwise, it returns an error if decompression fails or the data exceeds the size limit. +// decompress processes the given data by decompressing it using either +// a custom decompressor or a standard compressor. If a custom decompressor +// is provided, it takes precedence. The function validates that +// the decompressed data does not exceed the specified maximum size and returns +// an error if this limit is exceeded. On success, it returns the decompressed +// data. Otherwise, it returns an error if decompression fails or the data +// exceeds the size limit. func decompress(compressor encoding.Compressor, d mem.BufferSlice, dc Decompressor, maxReceiveMessageSize int, pool mem.BufferPool) (mem.BufferSlice, error) { if dc != nil { - uncompressed, err := dc.Do(d.Reader()) + r := d.Reader() + uncompressed, err := dc.Do(r) if err != nil { + r.Close() // ensure buffers are reused return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) } if len(uncompressed) > maxReceiveMessageSize { + r.Close() // ensure buffers are reused return nil, status.Errorf(codes.ResourceExhausted, "grpc: message after decompression larger than max (%d vs. %d)", len(uncompressed), maxReceiveMessageSize) } return mem.BufferSlice{mem.SliceBuffer(uncompressed)}, nil } if compressor != nil { - dcReader, err := compressor.Decompress(d.Reader()) + r := d.Reader() + dcReader, err := compressor.Decompress(r) if err != nil { + r.Close() // ensure buffers are reused return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the message: %v", err) } @@ -990,11 +998,13 @@ func decompress(compressor encoding.Compressor, d mem.BufferSlice, dc Decompress } out, err := mem.ReadAll(dcReader, pool) if err != nil { + r.Close() // ensure buffers are reused out.Free() return nil, status.Errorf(codes.Internal, "grpc: failed to read decompressed data: %v", err) } if out.Len() > maxReceiveMessageSize { + r.Close() // ensure buffers are reused out.Free() return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max %d", maxReceiveMessageSize) } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 1b5cefe81..5229adf71 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -42,6 +42,7 @@ import ( "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpcutil" istats "google.golang.org/grpc/internal/stats" @@ -149,6 +150,8 @@ type Server struct { serverWorkerChannel chan func() serverWorkerChannelClose func() + + strictPathCheckingLogEmitted atomic.Bool } type serverOptions struct { @@ -189,6 +192,7 @@ var defaultServerOptions = serverOptions{ maxSendMessageSize: defaultServerMaxSendMessageSize, connectionTimeout: 120 * time.Second, writeBufferSize: defaultWriteBufSize, + sharedWriteBuffer: true, readBufferSize: defaultReadBufSize, bufferPool: mem.DefaultBufferPool(), } @@ -1762,6 +1766,24 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv return ss.s.WriteStatus(statusOK) } +func (s *Server) handleMalformedMethodName(stream *transport.ServerStream, ti *traceInfo) { + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{stream.Method()}}, true) + ti.tr.SetError() + } + errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) + if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil { + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ti.tr.SetError() + } + channelz.Warningf(logger, s.channelz, "grpc: Server.handleStream failed to write status: %v", err) + } + if ti != nil { + ti.tr.Finish() + } +} + func (s *Server) handleStream(t transport.ServerTransport, stream *transport.ServerStream) { ctx := stream.Context() ctx = contextWithServer(ctx, s) @@ -1782,26 +1804,30 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Ser } sm := stream.Method() - if sm != "" && sm[0] == '/' { + if sm == "" { + s.handleMalformedMethodName(stream, ti) + return + } + if sm[0] != '/' { + // TODO(easwars): Add a link to the CVE in the below log messages once + // published. + if envconfig.DisableStrictPathChecking { + if old := s.strictPathCheckingLogEmitted.Swap(true); !old { + channelz.Warningf(logger, s.channelz, "grpc: Server.handleStream received malformed method name %q. Allowing it because the environment variable GRPC_GO_EXPERIMENTAL_DISABLE_STRICT_PATH_CHECKING is set to true, but this option will be removed in a future release.", sm) + } + } else { + if old := s.strictPathCheckingLogEmitted.Swap(true); !old { + channelz.Warningf(logger, s.channelz, "grpc: Server.handleStream rejected malformed method name %q. To temporarily allow such requests, set the environment variable GRPC_GO_EXPERIMENTAL_DISABLE_STRICT_PATH_CHECKING to true. Note that this is not recommended as it may allow requests to bypass security policies.", sm) + } + s.handleMalformedMethodName(stream, ti) + return + } + } else { sm = sm[1:] } pos := strings.LastIndex(sm, "/") if pos == -1 { - if ti != nil { - ti.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true) - ti.tr.SetError() - } - errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) - if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil { - if ti != nil { - ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) - ti.tr.SetError() - } - channelz.Warningf(logger, s.channelz, "grpc: Server.handleStream failed to write status: %v", err) - } - if ti != nil { - ti.tr.Finish() - } + s.handleMalformedMethodName(stream, ti) return } service := sm[:pos] diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index f92102fb4..eedb5f9b9 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -548,7 +548,7 @@ func (a *csAttempt) newStream() error { } } } - s, err := a.transport.NewStream(a.ctx, cs.callHdr) + s, err := a.transport.NewStream(a.ctx, cs.callHdr, a.statsHandler) if err != nil { nse, ok := err.(*transport.NewStreamError) if !ok { @@ -1354,7 +1354,8 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin transport: t, } - s, err := as.transport.NewStream(as.ctx, as.callHdr) + // nil stats handler: internal streams like health and ORCA do not support telemetry. + s, err := as.transport.NewStream(as.ctx, as.callHdr, nil) if err != nil { err = toRPCErr(err) return nil, err diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index c1225b910..12f649dcb 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.79.1" +const Version = "1.80.0" diff --git a/vendor/modules.txt b/vendor/modules.txt index 81c2f72ab..ebc89741d 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -29,11 +29,12 @@ github.com/cespare/xxhash/v2 # github.com/chzyer/readline v1.5.1 ## explicit; go 1.15 github.com/chzyer/readline -# github.com/cilium/ebpf v0.20.0 +# github.com/cilium/ebpf v0.21.0 ## explicit; go 1.24.0 github.com/cilium/ebpf github.com/cilium/ebpf/asm github.com/cilium/ebpf/btf +github.com/cilium/ebpf/features github.com/cilium/ebpf/internal github.com/cilium/ebpf/internal/efw github.com/cilium/ebpf/internal/epoll @@ -121,8 +122,8 @@ github.com/davecgh/go-spew/spew # github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f ## explicit github.com/dgryski/go-rendezvous -# github.com/distribution/distribution/v3 v3.0.0 -## explicit; go 1.23.7 +# github.com/distribution/distribution/v3 v3.1.0 +## explicit; go 1.25.0 github.com/distribution/distribution/v3 github.com/distribution/distribution/v3/configuration github.com/distribution/distribution/v3/health @@ -133,6 +134,7 @@ github.com/distribution/distribution/v3/internal/client/auth/challenge github.com/distribution/distribution/v3/internal/client/transport github.com/distribution/distribution/v3/internal/dcontext github.com/distribution/distribution/v3/internal/requestutil +github.com/distribution/distribution/v3/internal/uuid github.com/distribution/distribution/v3/manifest github.com/distribution/distribution/v3/manifest/manifestlist github.com/distribution/distribution/v3/manifest/ocischema @@ -204,7 +206,7 @@ github.com/felixge/httpsnoop ## explicit; go 1.17 github.com/fsnotify/fsnotify github.com/fsnotify/fsnotify/internal -# github.com/fxamacker/cbor/v2 v2.9.0 +# github.com/fxamacker/cbor/v2 v2.9.1 ## explicit; go 1.20 github.com/fxamacker/cbor/v2 # github.com/go-errors/errors v1.5.1 @@ -221,50 +223,50 @@ github.com/go-logr/stdr # github.com/go-logr/zapr v1.3.0 ## explicit; go 1.18 github.com/go-logr/zapr -# github.com/go-openapi/jsonpointer v0.22.4 +# github.com/go-openapi/jsonpointer v0.22.5 ## explicit; go 1.24.0 github.com/go-openapi/jsonpointer -# github.com/go-openapi/jsonreference v0.21.4 +# github.com/go-openapi/jsonreference v0.21.5 ## explicit; go 1.24.0 github.com/go-openapi/jsonreference github.com/go-openapi/jsonreference/internal -# github.com/go-openapi/swag v0.25.4 +# github.com/go-openapi/swag v0.25.5 ## explicit; go 1.24.0 github.com/go-openapi/swag -# github.com/go-openapi/swag/cmdutils v0.25.4 +# github.com/go-openapi/swag/cmdutils v0.25.5 ## explicit; go 1.24.0 github.com/go-openapi/swag/cmdutils -# github.com/go-openapi/swag/conv v0.25.4 +# github.com/go-openapi/swag/conv v0.25.5 ## explicit; go 1.24.0 github.com/go-openapi/swag/conv -# github.com/go-openapi/swag/fileutils v0.25.4 +# github.com/go-openapi/swag/fileutils v0.25.5 ## explicit; go 1.24.0 github.com/go-openapi/swag/fileutils -# github.com/go-openapi/swag/jsonname v0.25.4 +# github.com/go-openapi/swag/jsonname v0.25.5 ## explicit; go 1.24.0 github.com/go-openapi/swag/jsonname -# github.com/go-openapi/swag/jsonutils v0.25.4 +# github.com/go-openapi/swag/jsonutils v0.25.5 ## explicit; go 1.24.0 github.com/go-openapi/swag/jsonutils github.com/go-openapi/swag/jsonutils/adapters github.com/go-openapi/swag/jsonutils/adapters/ifaces github.com/go-openapi/swag/jsonutils/adapters/stdlib/json -# github.com/go-openapi/swag/loading v0.25.4 +# github.com/go-openapi/swag/loading v0.25.5 ## explicit; go 1.24.0 github.com/go-openapi/swag/loading -# github.com/go-openapi/swag/mangling v0.25.4 +# github.com/go-openapi/swag/mangling v0.25.5 ## explicit; go 1.24.0 github.com/go-openapi/swag/mangling -# github.com/go-openapi/swag/netutils v0.25.4 +# github.com/go-openapi/swag/netutils v0.25.5 ## explicit; go 1.24.0 github.com/go-openapi/swag/netutils -# github.com/go-openapi/swag/stringutils v0.25.4 +# github.com/go-openapi/swag/stringutils v0.25.5 ## explicit; go 1.24.0 github.com/go-openapi/swag/stringutils -# github.com/go-openapi/swag/typeutils v0.25.4 +# github.com/go-openapi/swag/typeutils v0.25.5 ## explicit; go 1.24.0 github.com/go-openapi/swag/typeutils -# github.com/go-openapi/swag/yamlutils v0.25.4 +# github.com/go-openapi/swag/yamlutils v0.25.5 ## explicit; go 1.24.0 github.com/go-openapi/swag/yamlutils # github.com/go-task/slim-sprig/v3 v3.0.0 @@ -277,7 +279,7 @@ github.com/gogo/protobuf/sortkeys # github.com/google/btree v1.1.3 ## explicit; go 1.18 github.com/google/btree -# github.com/google/cel-go v0.27.0 +# github.com/google/cel-go v0.28.0 ## explicit; go 1.23.0 github.com/google/cel-go/cel github.com/google/cel-go/checker @@ -316,10 +318,10 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/go-containerregistry v0.21.1 -## explicit; go 1.25.6 +# github.com/google/go-containerregistry v0.21.5 +## explicit; go 1.25.0 github.com/google/go-containerregistry/pkg/name -# github.com/google/pprof v0.0.0-20260202012954-cb029daf43ef +# github.com/google/pprof v0.0.0-20260402051712-545e8a4df936 ## explicit; go 1.24.0 github.com/google/pprof/profile # github.com/google/uuid v1.6.0 @@ -350,8 +352,8 @@ github.com/inconshreveable/mousetrap # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/klauspost/compress v1.18.4 -## explicit; go 1.23 +# github.com/klauspost/compress v1.18.5 +## explicit; go 1.24 github.com/klauspost/compress github.com/klauspost/compress/fse github.com/klauspost/compress/huff0 @@ -522,8 +524,8 @@ github.com/openshift/library-go/pkg/operator/resource/resourcehelper github.com/openshift/library-go/pkg/operator/resource/resourcemerge github.com/openshift/library-go/pkg/operator/resource/resourceread github.com/openshift/library-go/pkg/operator/v1helpers -# github.com/panjf2000/ants/v2 v2.11.5 -## explicit; go 1.18 +# github.com/panjf2000/ants/v2 v2.12.0 +## explicit; go 1.19 github.com/panjf2000/ants/v2 github.com/panjf2000/ants/v2/pkg/sync # github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 @@ -555,7 +557,7 @@ github.com/prometheus/common/model # github.com/prometheus/otlptranslator v1.0.0 ## explicit; go 1.23.0 github.com/prometheus/otlptranslator -# github.com/prometheus/procfs v0.20.0 +# github.com/prometheus/procfs v0.20.1 ## explicit; go 1.25.0 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs @@ -590,10 +592,10 @@ github.com/secure-systems-lab/go-securesystemslib/encrypted # github.com/sigstore/fulcio v1.8.5 ## explicit; go 1.25.0 github.com/sigstore/fulcio/pkg/certificate -# github.com/sigstore/protobuf-specs v0.5.0 -## explicit; go 1.22.0 +# github.com/sigstore/protobuf-specs v0.5.1 +## explicit; go 1.23 github.com/sigstore/protobuf-specs/gen/pb-go/common/v1 -# github.com/sigstore/sigstore v1.10.4 +# github.com/sigstore/sigstore v1.10.5 ## explicit; go 1.25.0 github.com/sigstore/sigstore/pkg/cryptoutils github.com/sigstore/sigstore/pkg/signature @@ -618,19 +620,19 @@ github.com/xlab/treeprint ## explicit; go 1.24.0 go.opentelemetry.io/auto/sdk go.opentelemetry.io/auto/sdk/internal/telemetry -# go.opentelemetry.io/contrib/bridges/prometheus v0.65.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/contrib/bridges/prometheus v0.68.0 +## explicit; go 1.25.0 go.opentelemetry.io/contrib/bridges/prometheus -# go.opentelemetry.io/contrib/exporters/autoexport v0.65.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/contrib/exporters/autoexport v0.68.0 +## explicit; go 1.25.0 go.opentelemetry.io/contrib/exporters/autoexport -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.68.0 +## explicit; go 1.25.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv -# go.opentelemetry.io/otel v1.40.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/otel v1.43.0 +## explicit; go 1.25.0 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute go.opentelemetry.io/otel/attribute/internal @@ -638,54 +640,55 @@ go.opentelemetry.io/otel/attribute/internal/xxhash go.opentelemetry.io/otel/baggage go.opentelemetry.io/otel/codes go.opentelemetry.io/otel/internal/baggage +go.opentelemetry.io/otel/internal/errorhandler go.opentelemetry.io/otel/internal/global go.opentelemetry.io/otel/propagation go.opentelemetry.io/otel/semconv/internal go.opentelemetry.io/otel/semconv/v1.17.0 go.opentelemetry.io/otel/semconv/v1.24.0 go.opentelemetry.io/otel/semconv/v1.37.0 -go.opentelemetry.io/otel/semconv/v1.39.0 -go.opentelemetry.io/otel/semconv/v1.39.0/httpconv -go.opentelemetry.io/otel/semconv/v1.39.0/otelconv go.opentelemetry.io/otel/semconv/v1.4.0 -# go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.16.0 -## explicit; go 1.24.0 +go.opentelemetry.io/otel/semconv/v1.40.0 +go.opentelemetry.io/otel/semconv/v1.40.0/httpconv +go.opentelemetry.io/otel/semconv/v1.40.0/otelconv +# go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.19.0 +## explicit; go 1.25.0 go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/observ go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/x -# go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.16.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.19.0 +## explicit; go 1.25.0 go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/observ go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/retry go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/x -# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.40.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.43.0 +## explicit; go 1.25.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform -# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.40.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.43.0 +## explicit; go 1.25.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 +## explicit; go 1.25.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.43.0 +## explicit; go 1.25.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/counter @@ -694,8 +697,8 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0 +## explicit; go 1.25.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/counter @@ -704,42 +707,42 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/observ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/x -# go.opentelemetry.io/otel/exporters/prometheus v0.62.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/otel/exporters/prometheus v0.65.0 +## explicit; go 1.25.0 go.opentelemetry.io/otel/exporters/prometheus go.opentelemetry.io/otel/exporters/prometheus/internal go.opentelemetry.io/otel/exporters/prometheus/internal/counter go.opentelemetry.io/otel/exporters/prometheus/internal/observ go.opentelemetry.io/otel/exporters/prometheus/internal/x -# go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.16.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.19.0 +## explicit; go 1.25.0 go.opentelemetry.io/otel/exporters/stdout/stdoutlog -# go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.40.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.43.0 +## explicit; go 1.25.0 go.opentelemetry.io/otel/exporters/stdout/stdoutmetric go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/internal go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/internal/counter go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/internal/observ go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/internal/x -# go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.40.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.43.0 +## explicit; go 1.25.0 go.opentelemetry.io/otel/exporters/stdout/stdouttrace go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/counter go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/observ go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x -# go.opentelemetry.io/otel/log v0.16.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/otel/log v0.19.0 +## explicit; go 1.25.0 go.opentelemetry.io/otel/log go.opentelemetry.io/otel/log/embedded go.opentelemetry.io/otel/log/noop -# go.opentelemetry.io/otel/metric v1.40.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/otel/metric v1.43.0 +## explicit; go 1.25.0 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/sdk v1.40.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/otel/sdk v1.43.0 +## explicit; go 1.25.0 go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation go.opentelemetry.io/otel/sdk/internal/x @@ -748,13 +751,13 @@ go.opentelemetry.io/otel/sdk/trace go.opentelemetry.io/otel/sdk/trace/internal/env go.opentelemetry.io/otel/sdk/trace/internal/observ go.opentelemetry.io/otel/sdk/trace/tracetest -# go.opentelemetry.io/otel/sdk/log v0.16.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/otel/sdk/log v0.19.0 +## explicit; go 1.25.0 go.opentelemetry.io/otel/sdk/log go.opentelemetry.io/otel/sdk/log/internal/observ go.opentelemetry.io/otel/sdk/log/internal/x -# go.opentelemetry.io/otel/sdk/metric v1.40.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/otel/sdk/metric v1.43.0 +## explicit; go 1.25.0 go.opentelemetry.io/otel/sdk/metric go.opentelemetry.io/otel/sdk/metric/exemplar go.opentelemetry.io/otel/sdk/metric/internal @@ -762,14 +765,14 @@ go.opentelemetry.io/otel/sdk/metric/internal/aggregate go.opentelemetry.io/otel/sdk/metric/internal/observ go.opentelemetry.io/otel/sdk/metric/internal/reservoir go.opentelemetry.io/otel/sdk/metric/metricdata -# go.opentelemetry.io/otel/trace v1.40.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/otel/trace v1.43.0 +## explicit; go 1.25.0 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded go.opentelemetry.io/otel/trace/internal/telemetry go.opentelemetry.io/otel/trace/noop -# go.opentelemetry.io/proto/otlp v1.9.0 -## explicit; go 1.23.0 +# go.opentelemetry.io/proto/otlp v1.10.0 +## explicit; go 1.24.0 go.opentelemetry.io/proto/otlp/collector/logs/v1 go.opentelemetry.io/proto/otlp/collector/metrics/v1 go.opentelemetry.io/proto/otlp/collector/trace/v1 @@ -795,14 +798,14 @@ go.uber.org/zap/internal/exit go.uber.org/zap/internal/pool go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore -# go.yaml.in/yaml/v2 v2.4.3 +# go.yaml.in/yaml/v2 v2.4.4 ## explicit; go 1.15 go.yaml.in/yaml/v2 # go.yaml.in/yaml/v3 v3.0.4 ## explicit; go 1.16 go.yaml.in/yaml/v3 -# golang.org/x/crypto v0.48.0 -## explicit; go 1.24.0 +# golang.org/x/crypto v0.50.0 +## explicit; go 1.25.0 golang.org/x/crypto/acme golang.org/x/crypto/acme/autocert golang.org/x/crypto/bcrypt @@ -820,13 +823,13 @@ golang.org/x/crypto/openpgp/s2k golang.org/x/crypto/pbkdf2 golang.org/x/crypto/salsa20/salsa golang.org/x/crypto/scrypt -# golang.org/x/exp v0.0.0-20260218203240-3dfff04db8fa +# golang.org/x/exp v0.0.0-20260410095643-746e56fc9e2f ## explicit; go 1.25.0 golang.org/x/exp/slices -# golang.org/x/mod v0.33.0 -## explicit; go 1.24.0 +# golang.org/x/mod v0.35.0 +## explicit; go 1.25.0 golang.org/x/mod/semver -# golang.org/x/net v0.51.0 +# golang.org/x/net v0.53.0 ## explicit; go 1.25.0 golang.org/x/net/html golang.org/x/net/html/atom @@ -839,28 +842,29 @@ golang.org/x/net/idna golang.org/x/net/internal/httpcommon golang.org/x/net/internal/httpsfv golang.org/x/net/internal/timeseries +golang.org/x/net/publicsuffix golang.org/x/net/trace golang.org/x/net/websocket -# golang.org/x/oauth2 v0.35.0 -## explicit; go 1.24.0 +# golang.org/x/oauth2 v0.36.0 +## explicit; go 1.25.0 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sync v0.19.0 -## explicit; go 1.24.0 +# golang.org/x/sync v0.20.0 +## explicit; go 1.25.0 golang.org/x/sync/errgroup golang.org/x/sync/singleflight -# golang.org/x/sys v0.41.0 -## explicit; go 1.24.0 +# golang.org/x/sys v0.43.0 +## explicit; go 1.25.0 golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.40.0 -## explicit; go 1.24.0 +# golang.org/x/term v0.42.0 +## explicit; go 1.25.0 golang.org/x/term -# golang.org/x/text v0.34.0 -## explicit; go 1.24.0 +# golang.org/x/text v0.36.0 +## explicit; go 1.25.0 golang.org/x/text/encoding golang.org/x/text/encoding/charmap golang.org/x/text/encoding/htmlindex @@ -889,11 +893,11 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.14.0 -## explicit; go 1.24.0 +# golang.org/x/time v0.15.0 +## explicit; go 1.25.0 golang.org/x/time/rate -# golang.org/x/tools v0.42.0 -## explicit; go 1.24.0 +# golang.org/x/tools v0.44.0 +## explicit; go 1.25.0 golang.org/x/tools/cover golang.org/x/tools/go/ast/edge golang.org/x/tools/go/ast/inspector @@ -917,17 +921,17 @@ golang.org/x/tools/internal/versions # gomodules.xyz/jsonpatch/v2 v2.5.0 ## explicit; go 1.20 gomodules.xyz/jsonpatch/v2 -# google.golang.org/genproto/googleapis/api v0.0.0-20260226221140-a57be14db171 +# google.golang.org/genproto/googleapis/api v0.0.0-20260414002931-afd174a4e478 ## explicit; go 1.25.0 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/expr/v1alpha1 google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20260226221140-a57be14db171 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20260414002931-afd174a4e478 ## explicit; go 1.25.0 google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.79.1 +# google.golang.org/grpc v1.80.0 ## explicit; go 1.24.0 google.golang.org/grpc google.golang.org/grpc/attributes @@ -967,6 +971,7 @@ google.golang.org/grpc/internal/grpclog google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/grpcutil google.golang.org/grpc/internal/idle +google.golang.org/grpc/internal/mem google.golang.org/grpc/internal/metadata google.golang.org/grpc/internal/pretty google.golang.org/grpc/internal/proxyattributes @@ -1607,7 +1612,7 @@ k8s.io/kube-aggregator/pkg/apis/apiregistration/v1 k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1 k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1 -# k8s.io/kube-openapi v0.0.0-20260127142750-a19766b6e2d4 +# k8s.io/kube-openapi v0.0.0-20260414162039-ec9c827d403f ## explicit; go 1.23.0 k8s.io/kube-openapi/pkg/cached k8s.io/kube-openapi/pkg/common