# Configuration file that uses the Splunk exporters (OTLP HTTP, SignalFx) to push # data to Splunk products. extensions: headers_setter: headers: - action: upsert key: X-SF-TOKEN from_context: X-SF-TOKEN default_value: "${SPLUNK_ACCESS_TOKEN}" health_check: endpoint: "${SPLUNK_LISTEN_INTERFACE}:13133" http_forwarder: ingress: endpoint: "${SPLUNK_LISTEN_INTERFACE}:6060" egress: endpoint: "https://api.${SPLUNK_REALM}.signalfx.com" http_forwarder/signalfx: ingress: endpoint: "${SPLUNK_LISTEN_INTERFACE}:9943" # Whether to propagate the client metadata from the incoming requests to the backend. # Should be enabled to preserve incoming access token # include_metadata: true egress: endpoint: "https://ingest.${SPLUNK_REALM}.signalfx.com" zpages: endpoint: "${SPLUNK_LISTEN_INTERFACE}:55679" expvar: enabled: true receivers: jaeger: protocols: grpc: endpoint: "${SPLUNK_LISTEN_INTERFACE}:14250" thrift_binary: endpoint: "${SPLUNK_LISTEN_INTERFACE}:6832" thrift_compact: endpoint: "${SPLUNK_LISTEN_INTERFACE}:6831" thrift_http: endpoint: "${SPLUNK_LISTEN_INTERFACE}:14268" otlp: protocols: grpc: endpoint: "${SPLUNK_LISTEN_INTERFACE}:4317" # Uncomment below config to preserve incoming access token and use it instead of the token value set in exporter config # include_metadata: true http: endpoint: "${SPLUNK_LISTEN_INTERFACE}:4318" # Uncomment below config to preserve incoming access token and use it instead of the token value set in exporter config # include_metadata: true # This section is used to collect the OpenTelemetry Collector metrics # Even if just a Splunk APM customer, these metrics are included prometheus/internal: config: scrape_configs: - job_name: 'otel-collector' scrape_interval: 10s static_configs: - targets: ['0.0.0.0:8888'] metric_relabel_configs: - source_labels: [ __name__ ] regex: 'promhttp_metric_handler_errors.*' action: drop - source_labels: [ __name__ ] regex: 'otelcol_processor_batch_.*' action: drop zipkin: endpoint: "${SPLUNK_LISTEN_INTERFACE}:9411" processors: batch: metadata_keys: - X-SF-Token # Enabling the memory_limiter is strongly recommended for every pipeline. # Configuration is based on the amount of memory allocated to the collector. # For more information about memory limiter, see # https://github.com/open-telemetry/opentelemetry-collector/blob/main/processor/memorylimiter/README.md memory_limiter: check_interval: 2s limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB} # Optional: The following processor can be used to add a default "deployment.environment" attribute to the traces # when it's not populated by instrumentation libraries. # If enabled, make sure to enable this processor in the pipeline below. #resource/add_environment: #attributes: #- action: insert #value: staging/production/... #key: deployment.environment # The following processor is used to add "otelcol.service.mode" attribute to the internal metrics resource/add_mode: attributes: - action: insert value: "gateway" key: otelcol.service.mode # Detect if the collector is running on a cloud system. Overrides resource attributes set by receivers. # Detector order is important: the `system` detector goes last so it can't preclude cloud detectors from setting host/os info. # https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/resourcedetectionprocessor#ordering resourcedetection/internal: detectors: [gcp, ecs, ec2, azure, system] override: true exporters: # Traces otlphttp: traces_endpoint: "${SPLUNK_INGEST_URL}/v2/trace/otlp" sending_queue: num_consumers: 32 headers: "X-SF-Token": "${SPLUNK_ACCESS_TOKEN}" auth: authenticator: headers_setter # Metrics + Events signalfx: access_token: "${SPLUNK_ACCESS_TOKEN}" realm: "${SPLUNK_REALM}" sending_queue: num_consumers: 32 signalfx/internal: access_token: "${SPLUNK_ACCESS_TOKEN}" realm: "${SPLUNK_REALM}" sync_host_metadata: true # Debug #debug: #verbosity: detailed # Logs splunk_hec: token: "${SPLUNK_HEC_TOKEN}" endpoint: "${SPLUNK_HEC_URL}" source: "otel" sourcetype: "otel" profiling_data_enabled: false # Profiling splunk_hec/profiling: token: "${SPLUNK_ACCESS_TOKEN}" endpoint: "${SPLUNK_INGEST_URL}/v1/log" log_data_enabled: false # To send entities (applicable only if discovery mode is enabled) otlphttp/entities: logs_endpoint: "${SPLUNK_INGEST_URL}/v3/event" headers: "X-SF-Token": "${SPLUNK_ACCESS_TOKEN}" auth: authenticator: headers_setter connectors: # Routing connector to separate entity events from regular logs routing/logs: default_pipelines: [logs] table: - context: log condition: instrumentation_scope.attributes["otel.entity.event_as_log"] == true pipelines: [logs/entities] service: extensions: [headers_setter, health_check, http_forwarder, http_forwarder/signalfx, zpages] pipelines: traces: receivers: [jaeger, otlp, zipkin] processors: - memory_limiter - batch #- resource/add_environment exporters: [otlphttp] metrics: receivers: [otlp] processors: [memory_limiter, batch] exporters: [signalfx] metrics/internal: receivers: [prometheus/internal] processors: [memory_limiter, batch, resourcedetection/internal, resource/add_mode] exporters: [signalfx/internal] logs: receivers: [routing/logs] processors: [memory_limiter, batch] exporters: [splunk_hec, splunk_hec/profiling] logs/entities: receivers: [routing/logs] processors: [memory_limiter, batch] exporters: [otlphttp/entities] logs/split: receivers: [otlp] exporters: [routing/logs]