diff --git a/.chloggen/use_otlp_for_internal_metrics.yaml b/.chloggen/use_otlp_for_internal_metrics.yaml new file mode 100644 index 000000000..d674e857a --- /dev/null +++ b/.chloggen/use_otlp_for_internal_metrics.yaml @@ -0,0 +1,30 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. otlpreceiver) +component: telemetry + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Export internal telemetry via OTLP exporter + +# One or more tracking issues or pull requests related to the change +issues: [836] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: | + The collector supports exporting internal telemetry via Prometheus or using OTLP. + The OTLP approach removes the need to bind a port for the Prometheus endpoint, and pushes + metrics instead of holding them. + The OTLP exporter associated with internal telemetry feeds data to the OTLP receiver of the collector. + +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] + diff --git a/distributions/otelcol-contrib/config.yaml b/distributions/otelcol-contrib/config.yaml index 63aa9cb9f..7cda24bbe 100644 --- a/distributions/otelcol-contrib/config.yaml +++ b/distributions/otelcol-contrib/config.yaml @@ -16,15 +16,6 @@ receivers: http: endpoint: 0.0.0.0:4318 - # Collect own metrics - prometheus: - config: - scrape_configs: - - job_name: 'otel-collector' - scrape_interval: 10s - static_configs: - - targets: ['0.0.0.0:8888'] - jaeger: protocols: grpc: @@ -49,17 +40,14 @@ exporters: service: telemetry: metrics: + level: basic readers: - - pull: + - periodic: + interval: 10000 exporter: - prometheus: - host: '127.0.0.1' - port: 8888 - without_scope_info: true - without_type_suffix: true - without_units: true - with_resource_constant_labels: - included: [ ] + otlp: + protocol: grpc + endpoint: http://localhost:4317 pipelines: @@ -69,7 +57,7 @@ service: exporters: [debug] metrics: - receivers: [otlp, prometheus] + receivers: [otlp] processors: [batch] exporters: [debug] diff --git a/distributions/otelcol/config.yaml b/distributions/otelcol/config.yaml index 63aa9cb9f..7cda24bbe 100644 --- a/distributions/otelcol/config.yaml +++ b/distributions/otelcol/config.yaml @@ -16,15 +16,6 @@ receivers: http: endpoint: 0.0.0.0:4318 - # Collect own metrics - prometheus: - config: - scrape_configs: - - job_name: 'otel-collector' - scrape_interval: 10s - static_configs: - - targets: ['0.0.0.0:8888'] - jaeger: protocols: grpc: @@ -49,17 +40,14 @@ exporters: service: telemetry: metrics: + level: basic readers: - - pull: + - periodic: + interval: 10000 exporter: - prometheus: - host: '127.0.0.1' - port: 8888 - without_scope_info: true - without_type_suffix: true - without_units: true - with_resource_constant_labels: - included: [ ] + otlp: + protocol: grpc + endpoint: http://localhost:4317 pipelines: @@ -69,7 +57,7 @@ service: exporters: [debug] metrics: - receivers: [otlp, prometheus] + receivers: [otlp] processors: [batch] exporters: [debug] diff --git a/tests/golden/config.yaml b/tests/golden/config.yaml index 0fa969d77..6d658d21f 100644 --- a/tests/golden/config.yaml +++ b/tests/golden/config.yaml @@ -1,24 +1,8 @@ receivers: - # Collect own metrics - prometheus: - config: - scrape_configs: - - job_name: 'otel-collector' - scrape_interval: 2s - static_configs: - - targets: ['0.0.0.0:8888'] - - # Remove once https://github.com/open-telemetry/opentelemetry-collector/issues/14814 is fixed - metric_relabel_configs: - - source_labels: [service_name] - target_label: service.name - - source_labels: [service_instance_id] - target_label: service.instance.id - - source_labels: [service_version] - target_label: service.version - - regex: service_name|service_instance_id|service_version - action: labeldrop - + otlp: + protocols: + grpc: + http: exporters: debug: verbosity: detailed @@ -31,9 +15,19 @@ exporters: sending_queue: enabled: false service: + telemetry: + metrics: + level: basic + readers: + - periodic: + interval: 10000 + exporter: + otlp: + protocol: grpc + endpoint: http://localhost:4317 pipelines: metrics: receivers: - - prometheus + - otlp exporters: - otlp_grpc diff --git a/tests/golden/data/expected.yaml b/tests/golden/data/expected.yaml index d7d316205..676996fcd 100644 --- a/tests/golden/data/expected.yaml +++ b/tests/golden/data/expected.yaml @@ -1,167 +1,71 @@ resourceMetrics: - resource: attributes: - - key: server.port - value: - stringValue: "8888" - key: service.instance.id value: - stringValue: 5230d18c-f0dc-4ff3-9284-f273297bb543 + stringValue: 42fdc910-d740-4edc-9250-090292924727 - key: service.name value: stringValue: otelcol-contrib - key: service.version value: - stringValue: 0.139.0 - - key: url.scheme - value: - stringValue: http + stringValue: 0.142.0 + schemaUrl: https://opentelemetry.io/schemas/1.37.0 scopeMetrics: - metrics: - - description: Bytes of allocated heap objects (see 'go doc runtime.MemStats.HeapAlloc') [Alpha] - gauge: - dataPoints: - - asDouble: 2.0731296e+07 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: gauge - name: otelcol_process_runtime_heap_alloc_bytes - - description: Total number of internal errors encountered by the promhttp metric handler. - metadata: - - key: prometheus.type - value: - stringValue: counter - name: promhttp_metric_handler_errors_total + - description: Total CPU user and system time in seconds [Alpha] + name: otelcol_process_cpu_seconds sum: aggregationTemporality: 2 dataPoints: - - asDouble: 0 - attributes: - - key: cause - value: - stringValue: encoding - startTimeUnixNano: "1000000" - timeUnixNano: "1000000" - - asDouble: 0 - attributes: - - key: cause - value: - stringValue: gathering + - asDouble: 0.16999999999999998 startTimeUnixNano: "1000000" - timeUnixNano: "1000000" + timeUnixNano: "2000000" isMonotonic: true + unit: s + - description: Total physical memory (resident set size) [Alpha] + gauge: + dataPoints: + - asInt: "166756352" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: otelcol_process_memory_rss + unit: By + - description: Bytes of allocated heap objects (see 'go doc runtime.MemStats.HeapAlloc') [Alpha] + gauge: + dataPoints: + - asInt: "19258312" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: otelcol_process_runtime_heap_alloc_bytes + unit: By - description: Cumulative bytes allocated for heap objects (see 'go doc runtime.MemStats.TotalAlloc') [Alpha] - metadata: - - key: prometheus.type - value: - stringValue: counter name: otelcol_process_runtime_total_alloc_bytes sum: aggregationTemporality: 2 dataPoints: - - asDouble: 3.2133528e+07 + - asInt: "31136352" startTimeUnixNano: "1000000" - timeUnixNano: "1000000" + timeUnixNano: "2000000" isMonotonic: true + unit: By - description: Total bytes of memory obtained from the OS (see 'go doc runtime.MemStats.Sys') [Alpha] gauge: dataPoints: - - asDouble: 3.7312776e+07 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: gauge + - asInt: "37312776" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: otelcol_process_runtime_total_sys_memory_bytes + unit: By - description: Uptime of the process [Alpha] - metadata: - - key: prometheus.type - value: - stringValue: counter name: otelcol_process_uptime sum: aggregationTemporality: 2 dataPoints: - - asDouble: 6.80132517 + - asDouble: 10.002603172 startTimeUnixNano: "1000000" - timeUnixNano: "1000000" + timeUnixNano: "2000000" isMonotonic: true - - description: The scraping was successful - gauge: - dataPoints: - - asDouble: 1 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: gauge - name: up - - description: Duration of the scrape - gauge: - dataPoints: - - asDouble: 0.008631292 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: gauge - name: scrape_duration_seconds unit: s - - description: The number of samples the target exposed - gauge: - dataPoints: - - asDouble: 9 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: gauge - name: scrape_samples_scraped - - description: The number of samples remaining after metric relabeling was applied - gauge: - dataPoints: - - asDouble: 9 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: gauge - name: scrape_samples_post_metric_relabeling - - description: The approximate number of new series in this scrape - gauge: - dataPoints: - - asDouble: 9 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: gauge - name: scrape_series_added - - description: Total CPU user and system time in seconds [Alpha] - metadata: - - key: prometheus.type - value: - stringValue: counter - name: otelcol_process_cpu_seconds - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 0.2 - startTimeUnixNano: "1000000" - timeUnixNano: "1000000" - isMonotonic: true - - description: Total physical memory (resident set size) [Alpha] - gauge: - dataPoints: - - asDouble: 1.72855296e+08 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: gauge - name: otelcol_process_memory_rss scope: - name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver - version: 0.139.0 + name: go.opentelemetry.io/collector/service