{"$defs":{"github.com.GoogleCloudPlatform.opentelemetry-operations-go.exporter.collector.AttributeMapping":{"additionalProperties":false,"properties":{"key":{"title":"key","type":"string"},"replacement":{"title":"replacement","type":"string"}},"type":"object"},"github.com.GoogleCloudPlatform.opentelemetry-operations-go.exporter.collector.ImpersonateConfig":{"additionalProperties":false,"properties":{"delegates":{"items":{"type":"string"},"title":"delegates","type":"array"},"subject":{"title":"subject","type":"string"},"target_principal":{"title":"target_principal","type":"string"}},"type":"object"},"github.com.GoogleCloudPlatform.opentelemetry-operations-go.exporter.collector.LogConfig":{"additionalProperties":false,"properties":{"GetClientOptions":{"title":"GetClientOptions"},"compression":{"title":"compression","type":"string"},"default_log_name":{"title":"default_log_name","type":"string"},"endpoint":{"title":"endpoint","type":"string"},"grpc_pool_size":{"title":"grpc_pool_size","type":"integer"},"resource_filters":{"items":{"$ref":"#/$defs/github.com.GoogleCloudPlatform.opentelemetry-operations-go.exporter.collector.ResourceFilter"},"title":"resource_filters","type":"array"},"service_resource_labels":{"title":"service_resource_labels","type":"boolean"},"use_insecure":{"title":"use_insecure","type":"boolean"}},"type":"object"},"github.com.GoogleCloudPlatform.opentelemetry-operations-go.exporter.collector.MetricConfig":{"additionalProperties":false,"properties":{"ExtraMetrics":{"title":"ExtraMetrics"},"GetClientOptions":{"title":"GetClientOptions"},"GetMetricName":{"title":"GetMetricName"},"MapMonitoredResource":{"title":"MapMonitoredResource"},"compression":{"title":"compression","type":"string"},"create_metric_descriptor_buffer_size":{"title":"create_metric_descriptor_buffer_size","type":"integer"},"create_service_timeseries":{"title":"create_service_timeseries","type":"boolean"},"cumulative_normalization":{"title":"cumulative_normalization","type":"boolean"},"endpoint":{"title":"endpoint","type":"string"},"experimental_wal_config":{"$ref":"#/$defs/github.com.GoogleCloudPlatform.opentelemetry-operations-go.exporter.collector.WALConfig","title":"experimental_wal_config"},"grpc_pool_size":{"title":"grpc_pool_size","type":"integer"},"instrumentation_library_labels":{"title":"instrumentation_library_labels","type":"boolean"},"known_domains":{"items":{"type":"string"},"title":"known_domains","type":"array"},"prefix":{"title":"prefix","type":"string"},"resource_filters":{"items":{"$ref":"#/$defs/github.com.GoogleCloudPlatform.opentelemetry-operations-go.exporter.collector.ResourceFilter"},"title":"resource_filters","type":"array"},"service_resource_labels":{"title":"service_resource_labels","type":"boolean"},"skip_create_descriptor":{"title":"skip_create_descriptor","type":"boolean"},"sum_of_squared_deviation":{"title":"sum_of_squared_deviation","type":"boolean"},"use_insecure":{"title":"use_insecure","type":"boolean"}},"type":"object"},"github.com.GoogleCloudPlatform.opentelemetry-operations-go.exporter.collector.ResourceFilter":{"additionalProperties":false,"properties":{"prefix":{"title":"prefix","type":"string"},"regex":{"title":"regex","type":"string"}},"type":"object"},"github.com.GoogleCloudPlatform.opentelemetry-operations-go.exporter.collector.TraceConfig":{"additionalProperties":false,"properties":{"GetClientOptions":{"title":"GetClientOptions"},"attribute_mappings":{"items":{"$ref":"#/$defs/github.com.GoogleCloudPlatform.opentelemetry-operations-go.exporter.collector.AttributeMapping"},"title":"attribute_mappings","type":"array"},"compression":{"title":"compression","type":"string"},"endpoint":{"title":"endpoint","type":"string"},"grpc_pool_size":{"title":"grpc_pool_size","type":"integer"},"use_insecure":{"title":"use_insecure","type":"boolean"}},"type":"object"},"github.com.GoogleCloudPlatform.opentelemetry-operations-go.exporter.collector.WALConfig":{"additionalProperties":false,"properties":{"directory":{"title":"directory","type":"string"},"max_backoff":{"title":"max_backoff","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.alibabacloudlogserviceexporter.Config":{"additionalProperties":false,"properties":{"access_key_id":{"description":"AlibabaCloud access key id","title":"access_key_id","type":"string"},"access_key_secret":{"description":"AlibabaCloud access key secret","title":"access_key_secret","type":"string"},"ecs_ram_role":{"description":"Set AlibabaCLoud ECS ram role if you are using ACK","title":"ecs_ram_role","type":"string"},"endpoint":{"description":"LogService's Endpoint, https://www.alibabacloud.com/help/doc-detail/29008.htm\nfor AlibabaCloud Kubernetes(or ECS), set {region-id}-intranet.log.aliyuncs.com, eg cn-hangzhou-intranet.log.aliyuncs.com;\n others set {region-id}.log.aliyuncs.com, eg cn-hangzhou.log.aliyuncs.com","title":"endpoint","type":"string"},"logstore":{"description":"LogService's Logstore Name","title":"logstore","type":"string"},"project":{"description":"LogService's Project Name","title":"project","type":"string"},"token_file_path":{"description":"Set Token File Path if you are using ACK","title":"token_file_path","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.awscloudwatchlogsexporter.Config":{"additionalProperties":false,"description":"Config represent a configuration for the CloudWatch logs exporter.","markdownDescription":"# AWS CloudWatch Logs Exporter\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: logs |\n| Distributions | [contrib], [observiq] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nAWS CloudWatch Logs Exporter sends logs data to AWS [CloudWatch Logs](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/WhatIsCloudWatchLogs.html).\nAWS credentials are retrieved from the [default credential chain](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials).\nRegion must be configured in the configuration if not set in the default credential chain.\n\nNOTE: OpenTelemetry Logging support is experimental, hence this exporter is subject to change.\n\n## Configuration\n\nThe following settings are required:\n\n- `log_group_name`: The group name of the CloudWatch logs.\n- `log_stream_name`: The stream name of the CloudWatch logs.\n\nThe following settings can be optionally configured:\n\n- `region`: The AWS region where the log stream is in.\n- `endpoint`: The CloudWatch Logs service endpoint which the requests are forwarded to. [See the CloudWatch Logs endpoints](https://docs.aws.amazon.com/general/latest/gr/cwl_region.html) for a list.\n- `log_retention`: LogRetention is the option to set the log retention policy for only newly created CloudWatch Log Groups. Defaults to Never Expire if not specified or set to 0. Possible values for retention in days are 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 2192, 2557, 2922, 3288, or 3653. \n- `tags`: Tags is the option to set tags for the CloudWatch Log Group. If specified, please add at most 50 tags. Input is a string to string map like so: { 'key': 'value' }. Keys must be between 1-128 characters and follow the regex pattern: `^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]+)$`(alphanumerics, whitespace, and _.:/=+-!). Values must be between 1-256 characters and follow the regex pattern: `^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$`(alphanumerics, whitespace, and _.:/=+-!). [Link to tagging restrictions](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_CreateLogGroup.html#:~:text=Required%3A%20Yes-,tags,-The%20key%2Dvalue)\n- `role_arn`: The AWS IAM role to upload segments to a same/different account\n- `raw_log`: Boolean default false. If you want to export only the log message to cw logs. This is required for emf logs. \n\n### Examples\n\nSimplest configuration:\n\n```yaml\nexporters:\n awscloudwatchlogs:\n log_group_name: \"testing-logs\"\n log_stream_name: \"testing-integrations-stream\"\n```\n\nAll configuration options:\n\n```yaml\nexporters:\n awscloudwatchlogs:\n log_group_name: \"testing-logs\"\n log_stream_name: \"testing-integrations-stream\"\n region: \"us-east-1\"\n role_arn: \"arn:aws:iam::123456789:role/monitoring-application-logs\"\n endpoint: \"logs.us-east-1.amazonaws.com\"\n log_retention: 365\n tags: { 'sampleKey': 'sampleValue'}\n sending_queue:\n queue_size: 50\n retry_on_failure:\n enabled: true\n initial_interval: 10ms\n```","properties":{"endpoint":{"description":"X-Ray service endpoint to which the collector sends segment documents.","title":"endpoint","type":"string"},"local_mode":{"description":"Local mode to skip EC2 instance metadata check.","title":"local_mode","type":"boolean"},"log_group_name":{"description":"LogGroupName is the name of CloudWatch log group which defines group of log streams\nthat share the same retention, monitoring, and access control settings.","title":"log_group_name","type":"string"},"log_retention":{"description":"LogRetention is the option to set the log retention policy for the CloudWatch Log Group. Defaults to Never Expire if not specified or set to 0\nPossible values are 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 2192, 2557, 2922, 3288, or 3653","title":"log_retention","type":"integer"},"log_stream_name":{"description":"LogStreamName is the name of CloudWatch log stream which is a sequence of log events\nthat share the same source.","title":"log_stream_name","type":"string"},"max_retries":{"description":"Maximum number of retries before abandoning an attempt to post data.","title":"max_retries","type":"integer"},"no_verify_ssl":{"description":"Enable or disable TLS certificate verification.","title":"no_verify_ssl","type":"boolean"},"num_workers":{"description":"Maximum number of concurrent calls to AWS X-Ray to upload documents.","title":"num_workers","type":"integer"},"proxy_address":{"description":"Upload segments to AWS X-Ray through a proxy.","title":"proxy_address","type":"string"},"raw_log":{"description":"Export raw log string instead of log wrapper\nRequired for emf logs","title":"raw_log","type":"boolean"},"region":{"description":"Send segments to AWS X-Ray service in a specific region.","title":"region","type":"string"},"request_timeout_seconds":{"description":"Number of seconds before timing out a request.","title":"request_timeout_seconds","type":"integer"},"resource_arn":{"description":"Amazon Resource Name (ARN) of the AWS resource running the collector.","title":"resource_arn","type":"string"},"retry_on_failure":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.RetrySettings","title":"retry_on_failure"},"role_arn":{"description":"IAM role to upload segments to a different account.","title":"role_arn","type":"string"},"sending_queue":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.awscloudwatchlogsexporter.QueueSettings","description":"QueueSettings is a subset of exporterhelper.QueueSettings,\nbecause only QueueSize is user-settable due to how AWS CloudWatch API works","title":"sending_queue"},"tags":{"description":"Tags is the option to set tags for the CloudWatch Log Group. If specified, please add add at least 1 and at most 50 tags. Input is a string to string map like so: { 'key': 'value' }\nKeys must be between 1-128 characters and follow the regex pattern: ^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]+)$\nValues must be between 1-256 characters and follow the regex pattern: ^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$","patternProperties":{".*":{"type":"string"}},"title":"tags","type":"object"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.awscloudwatchlogsexporter.QueueSettings":{"additionalProperties":false,"properties":{"queue_size":{"description":"QueueSize set the length of the sending queue","title":"queue_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.awsemfexporter.Config":{"additionalProperties":false,"description":"Config defines configuration for AWS EMF exporter.","properties":{"detailed_metrics":{"description":"DetailedMetrics is an option for retaining detailed datapoint values in exported metrics (e.g instead of exporting a quantile as a statistical value,\npreserve the quantile's population)","title":"detailed_metrics","type":"boolean"},"dimension_rollup_option":{"description":"DimensionRollupOption is the option for metrics dimension rollup. Three options are available, default option is \"ZeroAndSingleDimensionRollup\".\n\"ZeroAndSingleDimensionRollup\" - Enable both zero dimension rollup and single dimension rollup\n\"SingleDimensionRollupOnly\" - Enable single dimension rollup\n\"NoDimensionRollup\" - No dimension rollup (only keep original metrics which contain all dimensions)","title":"dimension_rollup_option","type":"string"},"eks_fargate_container_insights_enabled":{"description":"EKSFargateContainerInsightsEnabled is an option to reformat certin metric labels so that they take the form of a high level object\nThe end result will make the labels look like those coming out of ECS and be more easily injected into cloudwatch\nNote that at the moment in order to use this feature the value \"kubernetes\" must also be added to the ParseJSONEncodedAttributeValues array in order to be used","title":"eks_fargate_container_insights_enabled","type":"boolean"},"endpoint":{"description":"X-Ray service endpoint to which the collector sends segment documents.","title":"endpoint","type":"string"},"local_mode":{"description":"Local mode to skip EC2 instance metadata check.","title":"local_mode","type":"boolean"},"log_group_name":{"description":"LogGroupName is the name of CloudWatch log group which defines group of log streams\nthat share the same retention, monitoring, and access control settings.","title":"log_group_name","type":"string"},"log_retention":{"description":"LogRetention is the option to set the log retention policy for the CloudWatch Log Group. Defaults to Never Expire if not specified or set to 0\nPossible values are 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 2192, 2557, 2922, 3288, or 3653","title":"log_retention","type":"integer"},"log_stream_name":{"description":"LogStreamName is the name of CloudWatch log stream which is a sequence of log events\nthat share the same source.","title":"log_stream_name","type":"string"},"max_retries":{"description":"Maximum number of retries before abandoning an attempt to post data.","title":"max_retries","type":"integer"},"metric_declarations":{"description":"MetricDeclarations is the list of rules to be used to set dimensions for exported metrics.","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.awsemfexporter.MetricDeclaration"},"title":"metric_declarations","type":"array"},"metric_descriptors":{"description":"MetricDescriptors is the list of override metric descriptors that are sent to the CloudWatch","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.awsemfexporter.MetricDescriptor"},"title":"metric_descriptors","type":"array"},"namespace":{"description":"Namespace is a container for CloudWatch metrics.\nMetrics in different namespaces are isolated from each other.","title":"namespace","type":"string"},"no_verify_ssl":{"description":"Enable or disable TLS certificate verification.","title":"no_verify_ssl","type":"boolean"},"num_workers":{"description":"Maximum number of concurrent calls to AWS X-Ray to upload documents.","title":"num_workers","type":"integer"},"output_destination":{"description":"OutputDestination is an option to specify the EMFExporter output. Default option is \"cloudwatch\"\n\"cloudwatch\" - direct the exporter output to CloudWatch backend\n\"stdout\" - direct the exporter output to stdout\nTODO: we can support directing output to a file (in the future) while customer specifies a file path here.","title":"output_destination","type":"string"},"parse_json_encoded_attr_values":{"description":"ParseJSONEncodedAttributeValues is an array of attribute keys whose corresponding values are JSON-encoded as strings.\nThose strings will be decoded to its original json structure.","items":{"type":"string"},"title":"parse_json_encoded_attr_values","type":"array"},"proxy_address":{"description":"Upload segments to AWS X-Ray through a proxy.","title":"proxy_address","type":"string"},"region":{"description":"Send segments to AWS X-Ray service in a specific region.","title":"region","type":"string"},"request_timeout_seconds":{"description":"Number of seconds before timing out a request.","title":"request_timeout_seconds","type":"integer"},"resource_arn":{"description":"Amazon Resource Name (ARN) of the AWS resource running the collector.","title":"resource_arn","type":"string"},"resource_to_telemetry_conversion":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.pkg.resourcetotelemetry.Settings","description":"ResourceToTelemetrySettings is an option for converting resource attrihutes to telemetry attributes.\n\"Enabled\" - A boolean field to enable/disable this option. Default is `false`.\nIf enabled, all the resource attributes will be converted to metric labels by default.","title":"resource_to_telemetry_conversion"},"retain_initial_value_of_delta_metric":{"description":"RetainInitialValueOfDeltaMetric is the flag to signal that the initial value of a metric is a valid datapoint.\nThe default behavior is that the first value occurrence of a metric is set as the baseline for the calculation of\nthe delta to the next occurrence. With this flag set to true the exporter will instead use this first value as the\ninitial delta value. This is especially useful when handling low frequency metrics.","title":"retain_initial_value_of_delta_metric","type":"boolean"},"role_arn":{"description":"IAM role to upload segments to a different account.","title":"role_arn","type":"string"},"tags":{"description":"Tags is the option to set tags for the CloudWatch Log Group. If specified, please add at most 50 tags. Input is a string to string map like so: { 'key': 'value' }\nKeys must be between 1-128 characters and follow the regex pattern: ^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]+)$\nValues must be between 1-256 characters and follow the regex pattern: ^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$","patternProperties":{".*":{"type":"string"}},"title":"tags","type":"object"},"version":{"description":"Version is an option for sending metrics to CloudWatchLogs with Embedded Metric Format in selected version (with \"_aws\")\nhttps://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Embedded_Metric_Format_Specification.html#CloudWatch_Embedded_Metric_Format_Specification_structure\nOtherwise, sending metrics as Embedded Metric Format version 0 (without \"_aws\")","title":"version","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.awsemfexporter.LabelMatcher":{"additionalProperties":false,"description":"LabelMatcher defines a label filtering rule against the labels of incoming metrics.","properties":{"label_names":{"description":"List of label names to filter by. Their corresponding values are concatenated using\nthe separator and matched against the specified regular expression.","items":{"type":"string"},"title":"label_names","type":"array"},"regex":{"description":"Regex string to be used to match against values of the concatenated labels.","title":"regex","type":"string"},"separator":{"description":"(Optional) Separator placed between concatenated source label values. (Default: ';')","title":"separator","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.awsemfexporter.MetricDeclaration":{"additionalProperties":false,"description":"MetricDeclaration characterizes a rule to be used to set dimensions for certain incoming metrics, filtered by their metric names.","markdownDescription":"# AWS CloudWatch EMF Exporter for OpenTelemetry Collector\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: metrics |\n| Distributions | [contrib], [aws], [observiq] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[aws]: https://github.com/aws-observability/aws-otel-collector\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThis exporter converts OpenTelemetry metrics to \n[AWS CloudWatch Embedded Metric Format(EMF)](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Embedded_Metric_Format_Specification.html)\nand then sends them directly to CloudWatch Logs using the \n[PutLogEvents](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html) API.\n\n## Data Conversion\nConvert OpenTelemetry ```Int64DataPoints```, ```DoubleDataPoints```, ```SummaryDataPoints``` metrics datapoints into CloudWatch ```EMF``` structured log formats and send it to CloudWatch. Logs and Metrics will be displayed in CloudWatch console.\n\n## Exporter Configuration\n\nThe following exporter configuration parameters are supported.\n\n| Name | Description | Default |\n|:---------------------------------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| ------- |\n| `log_group_name` | Customized log group name which supports `{ClusterName}` and `{TaskId}` placeholders. One valid example is `/aws/metrics/{ClusterName}`. It will search for `ClusterName` (or `aws.ecs.cluster.name`) resource attribute in the metrics data and replace with the actual cluster name. If none of them are found in the resource attribute map, `{ClusterName}` will be replaced by `undefined`. Similar way, for the `{TaskId}`, it searches for `TaskId` (or `aws.ecs.task.id`) key in the resource attribute map. For `{NodeName}`, it searches for `NodeName` (or `k8s.node.name`) |\"/metrics/default\"|\n| `log_stream_name` | Customized log stream name which supports `{TaskId}`, `{ClusterName}`, `{NodeName}`, `{ContainerInstanceId}`, and `{TaskDefinitionFamily}` placeholders. One valid example is `{TaskId}`. It will search for `TaskId` (or `aws.ecs.task.id`) resource attribute in the metrics data and replace with the actual task id. If none of them are found in the resource attribute map, `{TaskId}` will be replaced by `undefined`. Similarly, for the `{TaskDefinitionFamily}`, it searches for `TaskDefinitionFamily` (or `aws.ecs.task.family`). For the `{ClusterName}`, it searches for `ClusterName` (or `aws.ecs.cluster.name`). For `{NodeName}`, it searches for `NodeName` (or `k8s.node.name`). For `{ContainerInstanceId}`, it searches for `ContainerInstanceId` (or `aws.ecs.container.instance.id`). (Note: ContainerInstanceId (or `aws.ecs.container.instance.id`) only works for AWS ECS EC2 launch type. |\"otel-stream\"|\n| `log_retention` | LogRetention is the option to set the log retention policy for only newly created CloudWatch Log Groups. Defaults to Never Expire if not specified or set to 0. Possible values for retention in days are 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 2192, 2557, 2922, 3288, or 3653. |\"Never Expire\"|\n| `tags` | Tags is the option to set tags for the CloudWatch Log Group. If specified, please add at most 50 tags. Input is a string to string map like so: { 'key': 'value' }. Keys must be between 1-128 characters and follow the regex pattern: `^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]+)$`(alphanumerics, whitespace, and _.:/=+-!). Values must be between 1-256 characters and follow the regex pattern: `^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$`(alphanumerics, whitespace, and _.:/=+-!). [Link to tagging restrictions](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_CreateLogGroup.html#:~:text=Required%3A%20Yes-,tags,-The%20key%2Dvalue) | No tags set |\n| `namespace` | Customized CloudWatch metrics namespace | \"default\" |\n| `endpoint` | Optionally override the default CloudWatch service endpoint. | |\n| `no_verify_ssl` | Enable or disable TLS certificate verification. | false |\n| `proxy_address` | Upload Structured Logs to AWS CloudWatch through a proxy. | |\n| `region` | Send Structured Logs to AWS CloudWatch in a specific region. If this field is not present in config, environment variable \"AWS_REGION\" can then be used to set region. | determined by metadata |\n| `role_arn` | IAM role to upload segments to a different account. | |\n| `max_retries` | Maximum number of retries before abandoning an attempt to post data. | 1 |\n| `dimension_rollup_option` | DimensionRollupOption is the option for metrics dimension rollup. Three options are available: `NoDimensionRollup`, `SingleDimensionRollupOnly` and `ZeroAndSingleDimensionRollup` |\"ZeroAndSingleDimensionRollup\" (Enable both zero dimension rollup and single dimension rollup)| \n| `resource_to_telemetry_conversion` | \"resource_to_telemetry_conversion\" is the option for converting resource attributes to telemetry attributes. It has only one config onption- `enabled`. For metrics, if `enabled=true`, all the resource attributes will be converted to metric labels by default. See `Resource Attributes to Metric Labels` section below for examples. | `enabled=false` | \n| `output_destination` | \"output_destination\" is an option to specify the EMFExporter output. Currently, two options are available. \"cloudwatch\" or \"stdout\" | `cloudwatch` | \n| `detailed_metrics` | Retain detailed datapoint values in exported metrics (e.g instead of exporting a quantile as a statistical value, preserve the quantile's population) | `false` | \n| `parse_json_encoded_attr_values` | List of attribute keys whose corresponding values are JSON-encoded strings and will be converted to JSON structures in emf logs. For example, the attribute string value \"{\\\\\"x\\\\\":5,\\\\\"y\\\\\":6}\" will be converted to a json object: ```{\"x\": 5, \"y\": 6}``` | [ ] | \n| [`metric_declarations`](#metric_declaration) | List of rules for filtering exported metrics and their dimensions. | [ ] |\n| [`metric_descriptors`](#metric_descriptor) | List of rules for inserting or updating metric descriptors. | [ ] |\n| `retain_initial_value_of_delta_metric` | This option specifies how the first value of a metric is handled. AWS EMF expects metric values to only contain deltas to the previous value. In the default case the first received value is therefor not sent to AWS but only used as a baseline for follow up changes to this metric. This is fine for high throughput metrics with stable labels (e.g. `requests{code=200}`). In this case it does not matter if the first value of this metric is discarded. However when your metric describes infrequent events or events with high label cardinality, then the exporter in default configuration would still drop the first occurrence of this metric. With this configuration value set to `true` the first value of all metrics will instead be send to AWS. | false |\n\n### metric_declaration\nA metric_declaration section characterizes a rule to be used to set dimensions for exported metrics, filtered by the incoming metrics' labels and metric names.\n\n| Name | Description | Default |\n| :---------------- |:------------------------------------------------------------------------------------------------------------------------------------------------------------------------| ------- |\n| `dimensions` | List of dimension sets to be exported. Dimension sets that include dimensions that are not labels are ignored. Use empty dimension set `[]` for metrics without labels. | [[ ]] |\n| `metric_name_selectors` | List of regex strings to filter metric names by. | |\n| [`label_matchers`](#label_matcher) | (Optional) list of label matching rules to filter metrics by their labels. This rule is applied to any metric that matches any of the label matchers. | [ ] |\n\n#### label_matcher\nA label_matcher section defines a matching rule against the labels of the incoming metric. Only metrics that match the rules will be used by the surrounding `metric_declaration`.\n\n| Name | Description | Default |\n| :---------------- | :--------------------------------------------------------------------- | ------- |\n| `label_names` | List of label names to filter by. Their corresponding values are concatenated using the separator and matched against the configured regular expression. | |\n| `separator` | (Optional) separator placed between concatenated label values. | \";\" |\n| `regex` | Regex string to be matched against concatenated label values. | |\n\n### metric_descriptor\nA metric descriptor section allows the schema of a metric to be overwritten before sending out to the CloudWatch backend service. Currently, we only support unit override.\n\n| Name | Description | Default |\n| :---------------- | :--------------------------------------------------------------------- | ------- |\n| `metric_name` | The name of the metric to be overwritten. | |\n| `unit` | The overwritten value of unit. The [MetricDatum](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_MetricDatum.html) contains a ful list of supported unit values. | |\n| `overwrite` | `true` if the schema should be overwritten with the given specification, otherwise it will only be configured if empty. | false |\n\n\n## AWS Credential Configuration\n\nThis exporter follows default credential resolution for the \n[aws-sdk-go](https://docs.aws.amazon.com/sdk-for-go/api/index.html).\n\nFollow the [guidelines](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html) for the \ncredential configuration.\n\n\n## Configuration Examples\n\n\n### Resource Attributes to Metric Labels\n`resource_to_telemetry_conversion` option can be enabled to convert all the resource attributes to metric labels. By default, this option is disabled. Users need to set `enabled=true` to opt-in. See the config example below.\n\n```yaml\nexporters:\n awsemf:\n region: 'us-west-2'\n resource_to_telemetry_conversion:\n enabled: true\n```\n\n### Metric Declaration\n\nThe following is an example of how to use `metric_declaration` to select what metrics should be exported.\n\n```yaml\nexporters:\n awsemf:\n region: 'us-west-2'\n output_destination: stdout\n dimension_rollup_option: \"NoDimensionRollup\"\n metric_declarations:\n - dimensions: [[]]\n metric_name_selectors:\n # Metric without label\n - \"^node_load15$\"\n - dimensions: [[device, fstype], []]\n metric_name_selectors:\n - \"^node_filesystem_readonly$\"\n```","properties":{"dimensions":{"description":"Dimensions is a list of dimension sets (which are lists of dimension names) to be\nincluded in exported metrics. If the metric does not contain any of the specified\ndimensions, the metric would be dropped (will only show up in logs).","items":{"items":{"type":"string"},"type":"array"},"title":"dimensions","type":"array"},"label_matchers":{"description":"(Optional) List of label matchers that define matching rules to filter against\nthe labels of incoming metrics.","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.awsemfexporter.LabelMatcher"},"title":"label_matchers","type":"array"},"metric_name_selectors":{"description":"MetricNameSelectors is a list of regex strings to be matched against metric names\nto determine which metrics should be included with this metric declaration rule.","items":{"type":"string"},"title":"metric_name_selectors","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.awsemfexporter.MetricDescriptor":{"additionalProperties":false,"properties":{"metric_name":{"description":"MetricName is the name of the metric","title":"metric_name","type":"string"},"overwrite":{"description":"Overwrite set to true means the existing metric descriptor will be overwritten or a new metric descriptor will be created; false means\nthe descriptor will only be configured if empty.","title":"overwrite","type":"boolean"},"unit":{"description":"Unit defines the override value of metric descriptor `unit`","title":"unit","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.awskinesisexporter.AWSConfig":{"additionalProperties":false,"description":"AWSConfig contains AWS specific configuration such as awskinesis stream, region, etc.","markdownDescription":"# Kinesis Exporter\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: traces, metrics, logs |\n| Distributions | [contrib], [observiq] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe kinesis exporter currently exports dynamic encodings to the configured kinesis stream.\nThe exporter relies heavily on the kinesis.PutRecords api to reduce network I/O and and reduces records into smallest atomic representation\nto avoid hitting the hard limits placed on Records (No greater than 1Mb).\nThis producer will block until the operation is done to allow for retryable and queued data to help during high loads.\n\nThe following settings are required:\n- `aws`\n - `stream_name` (no default): The name of the Kinesis stream to export to.\n\nThe following settings can be optionally configured:\n- `aws`\n - `kinesis_endpoint` (no default)\n - `region` (default = us-west-2): the region that the kinesis stream is deployed in\n - `role` (no default): The role to be used in order to send data to the kinesis stream\n- `encoding`\n - `name` (default = otlp): defines the export type to be used to send to kinesis (available is `otlp_proto`, `otlp_json`, `zipkin_proto`, `zipkin_json`, `jaeger_proto`)\n - **Note** : `otlp_json` is considered experimental and _should not_ be used for production environments. \n - `compression` (default = none): allows to set the compression type (defaults BestSpeed for all) before forwarding to kinesis (available is `flate`, `gzip`, `zlib` or `none`)\n- `max_records_per_batch` (default = 500, PutRecords limit): The number of records that can be batched together then sent to kinesis.\n- `max_record_size` (default = 1Mb, PutRecord(s) limit on record size): The max allowed size that can be exported to kinesis\n- `timeout` (default = 5s): Is the timeout for every attempt to send data to the backend.\n- `retry_on_failure`\n - `enabled` (default = true)\n - `initial_interval` (default = 5s): Time to wait after the first failure before retrying; ignored if `enabled` is `false`\n - `max_interval` (default = 30s): Is the upper bound on backoff; ignored if `enabled` is `false`\n - `max_elapsed_time` (default = 120s): Is the maximum amount of time spent trying to send a batch; ignored if `enabled` is `false`\n- `sending_queue`\n - `enabled` (default = true)\n - `num_consumers` (default = 10): Number of consumers that dequeue batches; ignored if `enabled` is `false`\n - `queue_size` (default = 1000): Maximum number of batches kept in memory before dropping data; ignored if `enabled` is `false`;\n User should calculate this as `num_seconds * requests_per_second` where:\n - `num_seconds` is the number of seconds to buffer in case of a backend outage\n - `requests_per_second` is the average number of requests per seconds.\n\nExample Configuration:\n\n```yaml\nexporters:\n awskinesis:\n aws:\n stream_name: raw-trace-stream\n region: us-east-1\n role: arn:test-role\n```","properties":{"kinesis_endpoint":{"title":"kinesis_endpoint","type":"string"},"region":{"title":"region","type":"string"},"role":{"title":"role","type":"string"},"stream_name":{"title":"stream_name","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.awskinesisexporter.Config":{"additionalProperties":false,"description":"Config contains the main configuration options for the awskinesis exporter","properties":{"aws":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.awskinesisexporter.AWSConfig","title":"aws"},"encoding":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.awskinesisexporter.Encoding","title":"encoding"},"max_record_size":{"title":"max_record_size","type":"integer"},"max_records_per_batch":{"title":"max_records_per_batch","type":"integer"},"retry_on_failure":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.RetrySettings","title":"retry_on_failure"},"sending_queue":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.QueueSettings","title":"sending_queue"},"timeout":{"description":"Timeout is the timeout for every attempt to send data to the backend.","title":"timeout","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.awskinesisexporter.Encoding":{"additionalProperties":false,"properties":{"compression":{"title":"compression","type":"string"},"name":{"title":"name","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.awss3exporter.Config":{"additionalProperties":false,"description":"Config contains the main configuration options for the s3 exporter","properties":{"file_format":{"title":"file_format","type":"string"},"marshaler":{"title":"marshaler","type":"string"},"s3uploader":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.awss3exporter.S3UploaderConfig","title":"s3uploader"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.awss3exporter.S3UploaderConfig":{"additionalProperties":false,"description":"S3UploaderConfig contains aws s3 uploader related config to controls things like bucket, prefix, batching, connections, retries, etc.","markdownDescription":"# AWS S3 Exporter for OpenTelemetry Collector\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [alpha]: traces, metrics, logs |\n| Distributions | [contrib], [observiq], [sumo] |\n\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\n## Schema supported\nThis exporter targets to support proto/json format.\n\n## Exporter Configuration\n\nThe following exporter configuration parameters are supported. \n\n| Name | Description | Default |\n|:---------------|:------------------------------------------------------|----------|\n| `region` | AWS region. | |\n| `s3_bucket` | S3 bucket | |\n| `s3_prefix` | prefix for the S3 key (root directory inside bucket). | |\n| `s3_partition` | time granularity of S3 key: hour or minute | \"minute\" |\n| `file_prefix` | file prefix defined by user | |\n| `marshaler` | marshaler used to produce output data otlp_json | |\n\n# Example Configuration\n\nFollowing example configuration defines to store output in 'eu-central' region and bucket named 'databucket'.\n\n```yaml\nexporters:\n awss3:\n s3uploader:\n region: 'eu-central-1'\n s3_bucket: 'databucket'\n s3_prefix: 'metric'\n s3_partition: 'minute'\n```\n\nLogs and traces will be stored inside 'databucket' in the following path format.\n\n```console\nmetric/year=XXXX/month=XX/day=XX/hour=XX/minute=XX\n```\n\n## AWS Credential Configuration\n\nThis exporter follows default credential resolution for the\n[aws-sdk-go](https://docs.aws.amazon.com/sdk-for-go/api/index.html).\n\nFollow the [guidelines](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html) for the\ncredential configuration.","properties":{"file_prefix":{"title":"file_prefix","type":"string"},"region":{"title":"region","type":"string"},"s3_bucket":{"title":"s3_bucket","type":"string"},"s3_partition":{"title":"s3_partition","type":"string"},"s3_prefix":{"title":"s3_prefix","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.awsxrayexporter.Config":{"additionalProperties":false,"description":"Config defines configuration for AWS X-Ray exporter.","markdownDescription":"# AWS X-Ray Tracing Exporter for OpenTelemetry Collector\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: traces |\n| Distributions | [contrib], [aws], [observiq] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[aws]: https://github.com/aws-observability/aws-otel-collector\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThis exporter converts OpenTelemetry spans to\n[AWS X-Ray Segment Documents](https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html)\nand then sends them directly to X-Ray using the\n[PutTraceSegments](https://docs.aws.amazon.com/xray/latest/api/API_PutTraceSegments.html) API.\n\n## Data Conversion\n\nTrace IDs and Span IDs are expected to be originally generated by either AWS API Gateway or AWS ALB and\npropagated by them using the `X-Amzn-Trace-Id` HTTP header. However, other generation sources are\nsupported by replacing fully-random Trace IDs with X-Ray formatted Trace IDs where necessary:\n\n\u003e AWS X-Ray IDs are the same size as W3C Trace Context IDs but differ in that the first 32 bits of a Trace ID\n\u003e is the Unix epoch time when the trace was started. Since X-Ray only allows submission of Trace IDs from the\n\u003e past 30 days, received Trace IDs are checked and spans without a valid timestamp are dropped.\n\nThis means in order for spans to appear in X-Ray, the client SDK MUST use an X-Ray ID generator. For more\ninformation, see\n[configuring the X-Ray exporter](https://aws-otel.github.io/docs/getting-started/x-ray#configuring-the-aws-x-ray-exporter).\n\nThe `http` object is populated when the `component` attribute value is `grpc` as well as `http`. Other\nsynchronous call types should also result in the `http` object being populated.\n\n## AWS Specific Attributes\n\nThe following AWS-specific Span attributes are supported in addition to the standard names and values\ndefined in the OpenTelemetry Semantic Conventions.\n\n| Attribute name | Notes and examples | Required? |\n| :--------------- | :--------------------------------------------------------------------- | --------- |\n| `aws.operation` | The name of the API action invoked against an AWS service or resource. | No |\n| `aws.account_id` | The AWS account number if accessing resource in different account. | No |\n| `aws.region` | The AWS region if accessing resource in different region from app. | No |\n| `aws.request_id` | AWS-generated unique identifier for the request. | No |\n| `aws.queue_url` | For operations on an Amazon SQS queue, the queue's URL. | No |\n| `aws.table_name` | For operations on a DynamoDB table, the name of the table. | No |\n\nAny of these values supplied are used to populate the `aws` object in addition to any relevant data supplied\nby the Span Resource object. X-Ray uses this data to generate inferred segments for the remote APIs.\n\n## Exporter Configuration\n\nThe following exporter configuration parameters are supported. They mirror and have the same effect as the\ncomparable AWS X-Ray Daemon configuration values.\n\n| Name | Description | Default |\n|:-----------------------------|:-------------------------------------------------------------------------------------------------------------------| ------- |\n| `num_workers` | Maximum number of concurrent calls to AWS X-Ray to upload documents. | 8 |\n| `endpoint` | Optionally override the default X-Ray service endpoint. | |\n| `request_timeout_seconds` | Number of seconds before timing out a request. | 30 |\n| `max_retries` | Maximun number of attempts to post a batch before failing. | 2 |\n| `no_verify_ssl` | Enable or disable TLS certificate verification. | false |\n| `proxy_address` | Upload segments to AWS X-Ray through a proxy. | |\n| `region` | Send segments to AWS X-Ray service in a specific region. | |\n| `local_mode` | Local mode to skip EC2 instance metadata check. | false |\n| `resource_arn` | Amazon Resource Name (ARN) of the AWS resource running the collector. | |\n| `role_arn` | IAM role to upload segments to a different account. | |\n| `indexed_attributes` | List of attribute names to be converted to X-Ray annotations. | |\n| `index_all_attributes` | Enable or disable conversion of all OpenTelemetry attributes to X-Ray annotations. | false |\n| `aws_log_groups` | List of log group names for CloudWatch. | [] |\n| `telemetry.enabled` | Whether telemetry collection is enabled at all. | false |\n| `telemetry.include_metadata` | Whether to include metadata in the telemetry (InstanceID, Hostname, ResourceARN) | false |\n| `telemetry.contributors` | List of X-Ray component IDs contributing to the telemetry (ex. for multiple X-Ray receivers: awsxray/1, awsxray/2) | |\n| `telemetry.hostname` | Sets the Hostname included in the telemetry. | |\n| `telemetry.instance_id` | Sets the InstanceID included in the telemetry. | |\n| `telemetry.resource_arn` | Sets the Amazon Resource Name (ARN) included in the telemetry. | |\n\n## Traces and logs correlation\n\nAWS X-Ray can be integrated with CloudWatch Logs to correlate traces with logs. For this integration to work, the X-Ray\nsegments must have the AWS Property `cloudwatch_logs` set. This property is set using the AWS X-Ray exporter with the\nfollowing values that are evaluated in this order:\n\n1. `aws.log.group.arns` resource attribute.\n2. `aws.log.group.names` resource attribute.\n3. `aws_log_groups` configuration property.\n\nIn the case of multiple values are defined, the value with higher precedence will be used to set the `cloudwatch_logs` AWS Property.\n\n`aws.log.group.arns` and `aws.log.group.names` are slice resource attributes that can be set programmatically.\nAlternatively those resource attributes can be set using the [`OTEL_RESOURCE_ATTRIBUTES` environment variable](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/sdk.md#specifying-resource-information-via-an-environment-variable). In this case only a single log group/log group arn can\nbe provided as a string rather than a slice.\n\n## AWS Credential Configuration\n\nThis exporter follows default credential resolution for the\n[aws-sdk-go](https://docs.aws.amazon.com/sdk-for-go/api/index.html).\n\nFollow the [guidelines](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html) for the\ncredential configuration.\n\n[beta]:https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]:https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[AWS]:https://aws-otel.github.io/docs/getting-started/x-ray#configuring-the-aws-x-ray-exporter","properties":{"aws_log_groups":{"items":{"type":"string"},"title":"aws_log_groups","type":"array"},"endpoint":{"description":"X-Ray service endpoint to which the collector sends segment documents.","title":"endpoint","type":"string"},"index_all_attributes":{"description":"Set to true to convert all OpenTelemetry attributes to X-Ray annotation (indexed) ignoring the IndexedAttributes option.\nDefault value: false","title":"index_all_attributes","type":"boolean"},"indexed_attributes":{"description":"By default, OpenTelemetry attributes are converted to X-Ray metadata, which are not indexed.\nSpecify a list of attribute names to be converted to X-Ray annotations instead, which will be indexed.\nSee annotation vs. metadata: https://docs.aws.amazon.com/xray/latest/devguide/xray-concepts.html#xray-concepts-annotations","items":{"type":"string"},"title":"indexed_attributes","type":"array"},"local_mode":{"description":"Local mode to skip EC2 instance metadata check.","title":"local_mode","type":"boolean"},"max_retries":{"description":"Maximum number of retries before abandoning an attempt to post data.","title":"max_retries","type":"integer"},"no_verify_ssl":{"description":"Enable or disable TLS certificate verification.","title":"no_verify_ssl","type":"boolean"},"num_workers":{"description":"Maximum number of concurrent calls to AWS X-Ray to upload documents.","title":"num_workers","type":"integer"},"proxy_address":{"description":"Upload segments to AWS X-Ray through a proxy.","title":"proxy_address","type":"string"},"region":{"description":"Send segments to AWS X-Ray service in a specific region.","title":"region","type":"string"},"request_timeout_seconds":{"description":"Number of seconds before timing out a request.","title":"request_timeout_seconds","type":"integer"},"resource_arn":{"description":"Amazon Resource Name (ARN) of the AWS resource running the collector.","title":"resource_arn","type":"string"},"role_arn":{"description":"IAM role to upload segments to a different account.","title":"role_arn","type":"string"},"telemetry":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.internal.aws.xray.telemetry.Config","description":"TelemetryConfig contains the options for telemetry collection.","title":"telemetry"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.azuredataexplorerexporter.Config":{"additionalProperties":false,"description":"Config defines configuration for Azure Data Explorer Exporter","properties":{"application_id":{"title":"application_id","type":"string"},"application_key":{"title":"application_key","type":"string"},"cluster_uri":{"title":"cluster_uri","type":"string"},"db_name":{"title":"db_name","type":"string"},"ingestion_type":{"title":"ingestion_type","type":"string"},"logs_table_json_mapping":{"title":"logs_table_json_mapping","type":"string"},"logs_table_name":{"title":"logs_table_name","type":"string"},"metrics_table_json_mapping":{"title":"metrics_table_json_mapping","type":"string"},"metrics_table_name":{"title":"metrics_table_name","type":"string"},"tenant_id":{"title":"tenant_id","type":"string"},"traces_table_json_mapping":{"title":"traces_table_json_mapping","type":"string"},"traces_table_name":{"title":"traces_table_name","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.azuremonitorexporter.Config":{"additionalProperties":false,"description":"Config defines configuration for Azure Monitor","properties":{"endpoint":{"title":"endpoint","type":"string"},"instrumentation_key":{"title":"instrumentation_key","type":"string"},"maxbatchinterval":{"title":"maxbatchinterval","type":"string"},"maxbatchsize":{"title":"maxbatchsize","type":"integer"},"spaneventsenabled":{"title":"spaneventsenabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.carbonexporter.Config":{"additionalProperties":false,"description":"Config defines configuration for Carbon exporter.","markdownDescription":"# Carbon Exporter\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: metrics |\n| Distributions | [contrib], [observiq], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe [Carbon](https://github.com/graphite-project/carbon) exporter supports\nCarbon's [plaintext\nprotocol](https://graphite.readthedocs.io/en/stable/feeding-carbon.html#the-plaintext-protocol).\n\n## Configuration\n\nThe following settings are required:\n\n- `endpoint` (default = `localhost:2003`): Address and port that the\n exporter should send data to.\n- `timeout` (default = `5s`): Maximum duration allowed to connect\n and send data to the configured `endpoint`.\n\nExample:\n\n```yaml\nexporters:\n carbon:\n # by default it will export to localhost:2003 using tcp\n carbon/allsettings:\n # use endpoint to specify alternative destinations for the exporter,\n # the default is localhost:2003\n endpoint: localhost:8080\n # timeout is the maximum duration allowed to connecting and sending the\n # data to the configured endpoint.\n # The default is 5 seconds.\n timeout: 10s\n```\n\nThe full list of settings exposed for this receiver are documented [here](./config.go)\nwith detailed sample configurations [here](./testdata/config.yaml).","properties":{"endpoint":{"description":"Endpoint specifies host and port to send metrics in the Carbon plaintext\nformat. The default value is defined by the DefaultEndpoint constant.","title":"endpoint","type":"string"},"timeout":{"description":"Timeout is the maximum duration allowed to connecting and sending the\ndata to the Carbon/Graphite backend.\nThe default value is defined by the DefaultSendTimeout constant.","title":"timeout","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.cassandraexporter.Compression":{"additionalProperties":false,"properties":{"algorithm":{"title":"algorithm","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.cassandraexporter.Config":{"additionalProperties":false,"markdownDescription":"# Cassandra Exporter\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [alpha]: traces, logs |\n| Distributions | [contrib] |\n\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n\u003c!-- end autogenerated section --\u003e\n\n## Configuration options\n\nThe following settings can be optionally configured:\n\n- `dsn` The Cassandra server DSN (Data Source Name), for example `127.0.0.1`.\n reference: [https://pkg.go.dev/github.com/gocql/gocql](https://pkg.go.dev/github.com/gocql/gocql)\n- `keyspace` (default = otel): The keyspace name.\n- `trace_table` (default = otel_spans): The table name for traces.\n- `replication` (default = class: SimpleStrategy, replication_factor: 1) The strategy of\n replication. https://cassandra.apache.org/doc/4.1/cassandra/architecture/dynamo.html#replication-strategy\n- `compression` (default = LZ4Compressor) https://cassandra.apache.org/doc/latest/cassandra/operating/compression.html\n\n## Example\n\n```yaml\nexporters:\n cassandra:\n dsn: 127.0.0.1\n keyspace: \"otel\"\n trace_table: \"otel_spans\"\n replication:\n class: \"SimpleStrategy\"\n replication_factor: 1\n compression:\n algorithm: \"ZstdCompressor\"\n```","properties":{"compression":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.cassandraexporter.Compression","title":"compression"},"dsn":{"title":"dsn","type":"string"},"keyspace":{"title":"keyspace","type":"string"},"logs_table":{"title":"logs_table","type":"string"},"replication":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.cassandraexporter.Replication","title":"replication"},"trace_table":{"title":"trace_table","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.cassandraexporter.Replication":{"additionalProperties":false,"properties":{"class":{"title":"class","type":"string"},"replication_factor":{"title":"replication_factor","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.clickhouseexporter.Config":{"additionalProperties":false,"description":"Config defines configuration for Elastic exporter.","markdownDescription":"# ClickHouse Exporter\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [alpha]: traces, metrics, logs |\n| Distributions | [contrib] |\n\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n\u003c!-- end autogenerated section --\u003e\n\n\nThis exporter supports sending OpenTelemetry data to [ClickHouse](https://clickhouse.com/). \n\u003e ClickHouse is an open-source, high performance columnar OLAP database management system for real-time analytics using\n\u003e SQL.\n\u003e Throughput can be measured in rows per second or megabytes per second.\n\u003e If the data is placed in the page cache, a query that is not too complex is processed on modern hardware at a speed of\n\u003e approximately 2-10 GB/s of uncompressed data on a single server.\n\u003e If 10 bytes of columns are extracted, the speed is expected to be around 100-200 million rows per second.\n\nNote:\nAlways\nadd [batch-processor](https://github.com/open-telemetry/opentelemetry-collector/tree/main/processor/batchprocessor) to\ncollector pipeline,\nas [ClickHouse document says:](https://clickhouse.com/docs/en/introduction/performance/#performance-when-inserting-data)\n\u003e We recommend inserting data in packets of at least 1000 rows, or no more than a single request per second. When\n\u003e inserting to a MergeTree table from a tab-separated dump, the insertion speed can be from 50 to 200 MB/s.\n\n## User Cases\n\n1. Use [Grafana Clickhouse datasource](https://grafana.com/grafana/plugins/grafana-clickhouse-datasource/) or\n [vertamedia-clickhouse-datasource](https://grafana.com/grafana/plugins/vertamedia-clickhouse-datasource/) to make\n dashboard.\n Support time-series graph, table and logs.\n\n2. Analyze logs via powerful clickhouse SQL.\n\n### Logs\n\n- Get log severity count time series.\n\n```clickhouse\nSELECT toDateTime(toStartOfInterval(Timestamp, INTERVAL 60 second)) as time, SeverityText, count() as count\nFROM otel_logs\nWHERE time \u003e= NOW() - INTERVAL 1 HOUR\nGROUP BY SeverityText, time\nORDER BY time;\n```\n\n- Find any log.\n\n```clickhouse\nSELECT Timestamp as log_time, Body\nFROM otel_logs\nWHERE Timestamp \u003e= NOW() - INTERVAL 1 HOUR\nLimit 100;\n```\n\n- Find log with specific service.\n\n```clickhouse\nSELECT Timestamp as log_time, Body\nFROM otel_logs\nWHERE ServiceName = 'clickhouse-exporter'\n AND Timestamp \u003e= NOW() - INTERVAL 1 HOUR\nLimit 100;\n```\n\n- Find log with specific attribute.\n\n```clickhouse\nSELECT Timestamp as log_time, Body\nFROM otel_logs\nWHERE LogAttributes['container_name'] = '/example_flog_1'\n AND Timestamp \u003e= NOW() - INTERVAL 1 HOUR\nLimit 100;\n```\n\n- Find log with body contain string token.\n\n```clickhouse\nSELECT Timestamp as log_time, Body\nFROM otel_logs\nWHERE hasToken(Body, 'http')\n AND Timestamp \u003e= NOW() - INTERVAL 1 HOUR\nLimit 100;\n```\n\n- Find log with body contain string.\n\n```clickhouse\nSELECT Timestamp as log_time, Body\nFROM otel_logs\nWHERE Body like '%http%'\n AND Timestamp \u003e= NOW() - INTERVAL 1 HOUR\nLimit 100;\n```\n\n- Find log with body regexp match string.\n\n```clickhouse\nSELECT Timestamp as log_time, Body\nFROM otel_logs\nWHERE match(Body, 'http')\n AND Timestamp \u003e= NOW() - INTERVAL 1 HOUR\nLimit 100;\n```\n\n- Find log with body json extract.\n\n```clickhouse\nSELECT Timestamp as log_time, Body\nFROM otel_logs\nWHERE JSONExtractFloat(Body, 'bytes') \u003e 1000\n AND Timestamp \u003e= NOW() - INTERVAL 1 HOUR\nLimit 100;\n```\n\n### Traces\n\n- Find spans with specific attribute.\n\n```clickhouse\nSELECT Timestamp as log_time,\n TraceId,\n SpanId,\n ParentSpanId,\n SpanName,\n SpanKind,\n ServiceName,\n Duration,\n StatusCode,\n StatusMessage,\n toString(SpanAttributes),\n toString(ResourceAttributes),\n toString(Events.Name),\n toString(Links.TraceId)\nFROM otel_traces\nWHERE ServiceName = 'clickhouse-exporter'\n AND SpanAttributes['peer.service'] = 'tracegen-server'\n AND Timestamp \u003e= NOW() - INTERVAL 1 HOUR\nLimit 100;\n```\n\n- Find traces with traceID (using time primary index and TraceID skip index).\n\n```clickhouse\nWITH\n '391dae938234560b16bb63f51501cb6f' as trace_id,\n (SELECT min(Start) FROM otel_traces_trace_id_ts WHERE TraceId = trace_id) as start,\n (SELECT max(End) + 1 FROM otel_traces_trace_id_ts WHERE TraceId = trace_id) as end\nSELECT Timestamp as log_time,\n TraceId,\n SpanId,\n ParentSpanId,\n SpanName,\n SpanKind,\n ServiceName,\n Duration,\n StatusCode,\n StatusMessage,\n toString(SpanAttributes),\n toString(ResourceAttributes),\n toString(Events.Name),\n toString(Links.TraceId)\nFROM otel_traces\nWHERE TraceId = trace_id\n AND Timestamp \u003e= start\n AND Timestamp \u003c= end\nLimit 100;\n```\n\n- Find spans is error.\n\n```clickhouse\nSELECT Timestamp as log_time,\n TraceId,\n SpanId,\n ParentSpanId,\n SpanName,\n SpanKind,\n ServiceName,\n Duration,\n StatusCode,\n StatusMessage,\n toString(SpanAttributes),\n toString(ResourceAttributes),\n toString(Events.Name),\n toString(Links.TraceId)\nFROM otel_traces\nWHERE ServiceName = 'clickhouse-exporter'\n AND StatusCode = 'STATUS_CODE_ERROR'\n AND Timestamp \u003e= NOW() - INTERVAL 1 HOUR\nLimit 100;\n```\n\n- Find slow spans.\n\n```clickhouse\nSELECT Timestamp as log_time,\n TraceId,\n SpanId,\n ParentSpanId,\n SpanName,\n SpanKind,\n ServiceName,\n Duration,\n StatusCode,\n StatusMessage,\n toString(SpanAttributes),\n toString(ResourceAttributes),\n toString(Events.Name),\n toString(Links.TraceId)\nFROM otel_traces\nWHERE ServiceName = 'clickhouse-exporter'\n AND Duration \u003e 1 * 1e9\n AND Timestamp \u003e= NOW() - INTERVAL 1 HOUR\nLimit 100;\n```\n\n### Metrics\n\nMetrics data is stored in different clickhouse tables depending on their types. The tables will have a suffix to\ndistinguish which type of metrics data is stored.\n\n| Metrics Type | Metrics Table |\n| --------------------- | ---------------------- |\n| sum | _sum |\n| gauge | _gauge |\n| histogram | _histogram |\n| exponential histogram | _exponential_histogram |\n| summary | _summary |\n\nBefore you make a metrics query, you need to know the type of metric you wish to use. If your metrics come from\nPrometheus(or someone else uses OpenMetrics protocol), you also need to know the\n[compatibility](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/compatibility/prometheus_and_openmetrics.md#prometheus-and-openmetrics-compatibility)\nbetween Prometheus(OpenMetrics) and OTLP Metrics.\n\n- Find a sum metrics with name\n```clickhouse\nselect TimeUnix,MetricName,Attributes,Value from otel_metrics_sum\nwhere MetricName='calls_total' limit 100\n```\n\n- Find a sum metrics with name, attribute.\n```clickhouse\nselect TimeUnix,MetricName,Attributes,Value from otel_metrics_sum\nwhere MetricName='calls_total' and Attributes['service_name']='featureflagservice'\nlimit 100\n```\n\nThe OTLP Metrics [define two type value for one datapoint](https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/metrics/v1/metrics.proto#L358),\nclickhouse only use one value of float64 to store them.\n\n## Performance Guide\n\nA single ClickHouse instance with 32 CPU cores and 128 GB RAM can handle around 20 TB (20 Billion) logs per day,\nthe data compression ratio is 7 ~ 11, the compressed data store in disk is 1.8 TB ~ 2.85 TB,\nadd more clickhouse node to cluster can increase linearly.\n\nThe otel-collector with `otlp receiver/batch processor/clickhouse tcp exporter` can process\naround 40k/s logs entry per CPU cores, add more collector node can increase linearly.\n\n## Configuration options\n\nThe following settings are required:\n\n- `endpoint` (no default): The ClickHouse server address, support multi host with port, for example:\n - tcp protocol `tcp://addr1:port,tcp://addr2:port` or TLS `tcp://addr1:port,addr2:port?secure=true`\n - http protocol `http://addr1:port,addr2:port` or https `https://addr1:port,addr2:port`\n - clickhouse protocol `clickhouse://addr1:port,addr2:port` or TLS `clickhouse://addr1:port,addr2:port?secure=true`\n\nMany other ClickHouse specific options can be configured through query parameters e.g. `addr?dial_timeout=5s\u0026compress=lz4`. For a full list of options see the [ClickHouse driver documentation](https://github.com/ClickHouse/clickhouse-go/blob/b2f9409ba1c7bb239a4f6553a6da347f3f5f1330/clickhouse_options.go#L174)\n\nConnection options:\n\n- `username` (default = ): The authentication username.\n- `password` (default = ): The authentication password.\n- `ttl_days` (default = 0): The data time-to-live in days, 0 means no ttl.\n- `database` (default = otel): The database name.\n- `connection_params` (default = {}). Params is the extra connection parameters with map format.\n\nClickHouse tables:\n\n- `logs_table_name` (default = otel_logs): The table name for logs.\n- `traces_table_name` (default = otel_traces): The table name for traces.\n- `metrics_table_name` (default = otel_metrics): The table name for metrics.\n\nProcessing:\n\n- `timeout` (default = 5s): The timeout for every attempt to send data to the backend.\n- `sending_queue`\n - `queue_size` (default = 1000): Maximum number of batches kept in memory before dropping data.\n- `retry_on_failure`\n - `enabled` (default = true)\n - `initial_interval` (default = 5s): The Time to wait after the first failure before retrying; ignored if `enabled`\n is `false`\n - `max_interval` (default = 30s): The upper bound on backoff; ignored if `enabled` is `false`\n - `max_elapsed_time` (default = 300s): The maximum amount of time spent trying to send a batch; ignored if `enabled`\n is `false`\n\n## TLS\n\nThe exporter supports TLS. To enable TLS, you need to specify the `secure=true` query parameter in the `endpoint` URL or\nuse the `https` scheme.\n\n## Example\n\nThis example shows how to configure the exporter to send data to a ClickHouse server.\nIt uses the native protocol without TLS. The exporter will create the database and tables if they don't exist.\nThe data is stored for 3 days.\n\n```yaml\nreceivers:\n examplereceiver:\nprocessors:\n batch:\n timeout: 5s\n send_batch_size: 100000\nexporters:\n clickhouse:\n endpoint: tcp://127.0.0.1:9000?dial_timeout=10s\u0026compress=lz4\n database: otel\n ttl_days: 3\n logs_table_name: otel_logs\n traces_table_name: otel_traces\n metrics_table_name: otel_metrics\n timeout: 5s\n retry_on_failure:\n enabled: true\n initial_interval: 5s\n max_interval: 30s\n max_elapsed_time: 300s\nservice:\n pipelines:\n logs:\n receivers: [ examplereceiver ]\n processors: [ batch ]\n exporters: [ clickhouse ]\n```","properties":{"connection_params":{"description":"ConnectionParams is the extra connection parameters with map format. for example compression/dial_timeout","patternProperties":{".*":{"type":"string"}},"title":"connection_params","type":"object"},"database":{"description":"Database is the database name to export.","title":"database","type":"string"},"endpoint":{"description":"Endpoint is the clickhouse endpoint.","title":"endpoint","type":"string"},"logs_table_name":{"description":"LogsTableName is the table name for logs. default is `otel_logs`.","title":"logs_table_name","type":"string"},"metrics_table_name":{"description":"MetricsTableName is the table name for metrics. default is `otel_metrics`.","title":"metrics_table_name","type":"string"},"password":{"description":"Username is the authentication password.","title":"password","type":"string"},"retry_on_failure":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.RetrySettings","title":"retry_on_failure"},"sending_queue":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.clickhouseexporter.QueueSettings","description":"QueueSettings is a subset of exporterhelper.QueueSettings,\nbecause only QueueSize is user-settable.","title":"sending_queue"},"timeout":{"description":"Timeout is the timeout for every attempt to send data to the backend.","title":"timeout","type":"string"},"traces_table_name":{"description":"TracesTableName is the table name for logs. default is `otel_traces`.","title":"traces_table_name","type":"string"},"ttl_days":{"description":"TTLDays is The data time-to-live in days, 0 means no ttl.","title":"ttl_days","type":"integer"},"username":{"description":"Username is the authentication username.","title":"username","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.clickhouseexporter.QueueSettings":{"additionalProperties":false,"description":"QueueSettings is a subset of exporterhelper.QueueSettings.","properties":{"queue_size":{"description":"QueueSize set the length of the sending queue","title":"queue_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.coralogixexporter.Config":{"additionalProperties":false,"description":"Config defines by Coralogix.","markdownDescription":"# Coralogix Exporter\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [alpha]: logs |\n| | [beta]: traces, metrics |\n| Distributions | [contrib], [observiq] |\n\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe Coralogix exporter sends traces, metrics and logs to [Coralogix](https://coralogix.com/).\n\n\u003e Please review the Collector's [security\n\u003e documentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/security-best-practices.md),\n\u003e which contains recommendations on securing sensitive information such as the\n\u003e API key required by this exporter.\n\n## Configuration\n\nExample configuration:\n```yaml\nexporters:\n coralogix:\n # The Coralogix domain\n domain: \"coralogix.com\"\n # Your Coralogix private key is sensitive\n private_key: \"xxx\"\n\n # (Optional) Ordered list of Resource attributes that are used for Coralogix\n # AppName and SubSystem values. The first non-empty Resource attribute is used.\n # Example: application_name_attributes: [\"k8s.namespace.name\", \"service.namespace\"]\n # Example: subsystem_name_attributes: [\"k8s.deployment.name\", \"k8s.daemonset.name\", \"service.name\"]\n application_name_attributes:\n - \"service.namespace\"\n subsystem_name_attributes:\n - \"service.name\"\n\n # Traces, Metrics and Logs emitted by this OpenTelemetry exporter \n # are tagged in Coralogix with the default application and subsystem constants.\n application_name: \"MyBusinessEnvironment\"\n subsystem_name: \"MyBusinessSystem\"\n\n # (Optional) Timeout is the timeout for every attempt to send data to the backend.\n timeout: 30s\n```\n\n### v0.76.0 Coralogix Domain \n\nSince v0.76.0 you can specify Coralogix domain in the configuration file instead of specifying different endpoints for traces, metrics and logs. For example, the configuration below, can be replaced with domain field:\n\nOld configuration:\n```yaml\nexporters:\n coralogix:\n traces:\n endpoint: \"ingress.coralogix.com:443\"\n metrics:\n endpoint: \"ingress.coralogix.com:443\"\n logs:\n endpoint: \"ingress.coralogix.com:443\"\n```\n\nNew configuration with domain field:\n```yaml\nexporters:\n coralogix:\n domain: \"coralogix.com\"\n```\n\n### Coralogix's Domain \n\nDepending on your region, you might need to use a different domain. Here are the available domains:\n\n| Region | Domain |\n|---------|---------------------------------|\n| USA1 | `coralogix.us` |\n| APAC1 | `coralogix.in` |\n| APAC2 | `coralogixsg.com` |\n| EUROPE1 | `coralogix.com` |\n| EUROPE2 | `eu2.coralogix.com` |\n\nAdditionally, Coralogix supports AWS PrivateLink, which provides private connectivity between virtual private clouds (VPCs), supported AWS services, and your on-premises networks without exposing your traffic to the public internet.\n\nHere are available AWS PrivateLink domains:\n\n| Region | Domain |\n|---------|-----------------------------|\n| USA1 | `private.coralogix.com` |\n| APAC1 | `private.coralogix.in` |\n| APAC2 | `private.coralogixsg.com` |\n| EUROPE1 | `private.coralogix.com` |\n| EUROPE2 | `private.eu2.coralogix.com` |\n\nLearn more about [AWS PrivateLink in the documentation page](https://coralogix.com/docs/coralogix-amazon-web-services-aws-privatelink-endpoints/).\n\n### Application and SubSystem attributes\n\nv0.62.0 release of OpenTelemetry Collector allows you to map Application name and Subsystem name to Resource attributes. \nYou need to set `application_name_attributes` and `subsystem_name_attributes` fields with a list of potential Resource attributes for the AppName and Subsystem values. The first not-empty Resource attribute is going to be used.\n\n### Kubernetes attributes\n\nWhen using OpenTelemetry Collector with [k8sattribute](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/k8sattributesprocessor) processor, you can use attributes coming from Kubernetes, such as `k8s.namespace.name` or `k8s.deployment.name`. The following example shows recommended list of attributes:\n\n```yaml\nexporters:\n coralogix:\n domain: \"coralogix.com\"\n application_name_attributes:\n - \"service.namespace\"\n - \"k8s.namespace.name\" \n subsystem_name_attributes:\n - \"service.name\"\n - \"k8s.deployment.name\"\n - \"k8s.statefulset.name\"\n - \"k8s.daemonset.name\"\n - \"k8s.cronjob.name\"\n - \"k8s.job.name\"\n - \"k8s.container.name\"\n```\n### Host Attributes\n\nOpenTelemetry Collector [resourcedetection](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/resourcedetectionprocessor) processor can discover Host Resource attributes, such as `host.name` and provide Resource attributes using environment variables, which can be used for setting AppName and SubSystem fields in Coralogix.\n\nExample: \n```yaml\nprocessors:\n resourcedetection/system:\n detectors: [\"system\", \"env\"]\n system:\n hostname_sources: [\"os\"]\n```\n\nAnd setting environment variable such as:\n```\nOTEL_RESOURCE_ATTRIBUTES=\"env=production\"\n```\n\nYou can configure Coralogix Exporter:\n\n```yaml\nexporters:\n coralogix:\n domain: \"coralogix.com\"\n application_name_attributes:\n - \"env\" \n subsystem_name_attributes:\n - \"host.name\"\n```\n### EC2 Attributes\n\nOpenTelemetry Collector [resourcedetection](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/resourcedetectionprocessor) processor can discover EC2 Resource attributes, such as EC2 tags as resource attributes.\n\nExample: \n```yaml\nprocessors:\n resourcedetection/ec2:\n detectors: [\"ec2\"]\n ec2:\n # A list of regex's to match tag keys to add as resource attributes can be specified\n tags:\n - ^ec2.tag.name$\n - ^ec2.tag.subsystem$\n```\n\n**_NOTE:_** In order to fetch EC2 tags, the IAM role assigned to the EC2 instance must have a policy that includes the `ec2:DescribeTags` permission.\n\n```json\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Sid\": \"VisualEditor0\",\n \"Effect\": \"Allow\",\n \"Action\": \"ec2:DescribeTags\",\n \"Resource\": \"*\"\n }\n ]\n}\n```\n\nYou can configure Coralogix Exporter:\n\n```yaml\nexporters:\n coralogix:\n domain: \"coralogix.com\"\n application_name_attributes:\n - \"ec2.tag.name\" \n subsystem_name_attributes:\n - \"ec2.tag.subsystem\"\n```\n\n### Custom Attributes\n\nYou can combine and create custom Resource attributes using [transform](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/transformprocessor) processor. For example:\n```yaml\n processors:\n transform:\n error_mode: ignore\n log_statements:\n - context: resource\n statements:\n - set(attributes[\"applicationName\"], Concat([\"development-environment\", attributes[\"k8s.namespace.name\"]], \"-\"))\n```\n\nThen you can use the custom Resource attribute in Coralogix exporter:\n```yaml\nexporters:\n coralogix:\n domain: \"coralogix.com\"\n application_name_attributes:\n - \"applicationName\" \n subsystem_name_attributes:\n - \"host.name\"\n```\n\n### Exporting to multiple teams based on attributes\nYou can export the signals based on your business logic (attributes) to different Coralogix teams. To achieve this, you'll need to use the [`filter`](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/filterprocessor/README.md) processor and setup one pipeline per team. You can setup your `filter` processors as following (example with metrics):\n```\nprocessors: \n filter/teamA:\n metrics:\n datapoint:\n - 'attributes[\"your_label\"] != \"teamA\"'\n filter/teamB:\n metrics:\n datapoint:\n - 'attributes[\"your_label\"] != \"teamB\"'\n```\n\nThis configuration ensures separate processor per each team. Any data points without an attribute for a particular team will be dropped from exporting. \n\nSecondly, set up an individual exporter per each team:\n```\nexporters:\n coralogix/teamA:\n metrics:\n endpoint: \"otel-metrics.coralogix.com:443\"\n private_key: \u003cprivate_key_for_teamA\u003e\n application_name: \"MyBusinessEnvironment\"\n subsystem_name: \"MyBusinessSystem\"\n coralogix/teamB:\n metrics:\n endpoint: \"otel-metrics.coralogix.com:443\"\n private_key: \u003cprivate_key_for_teamB\u003e\n application_name: \"MyBusinessEnvironment\"\n subsystem_name: \"MyBusinessSystem\"\n```\n\nFinally, join each processor and exporter (and any other components you wish) in the pipelines. Here is an example with a Prometheus receiver:\n```\nservice:\n pipelines:\n metrics/1:\n receivers: [prometheus]\n processors: [filter/teamA]\n exporters: [coralogix/teamA]\n metrics/2:\n receivers: [prometheus]\n processors: [filter/teamB]\n exporters: [coralogix/teamB]\n```\n\n### Need help?\n\nOur world-class customer success team is available 24/7 to walk you through the setup for this exporter and answer any questions that may come up.\nFeel free to reach out to us **via our in-app chat** or by sending us an email to [support@coralogix.com](mailto:support@coralogix.com).","properties":{"application_name":{"description":"Default Coralogix application and subsystem name values.","title":"application_name","type":"string"},"application_name_attributes":{"description":"Ordered list of Resource attributes that are used for Coralogix\nAppName and SubSystem values. The first non-empty Resource attribute is used.\nExample: AppNameAttributes: [\"k8s.namespace.name\", \"service.namespace\"]\nExample: SubSystemAttributes: [\"k8s.deployment.name\", \"k8s.daemonset.name\", \"service.name\"]","items":{"type":"string"},"title":"application_name_attributes","type":"array"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing RPCs.","title":"auth"},"balancer_name":{"description":"Sets the balancer in grpclb_policy to discover the servers. Default is pick_first.\nhttps://github.com/grpc/grpc-go/blob/master/examples/features/load_balancing/README.md","title":"balancer_name","type":"string"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"domain":{"description":"Coralogix domain","title":"domain","type":"string"},"domain_settings":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configgrpc.GRPCClientSettings","description":"GRPC Settings used with Domain","title":"domain_settings"},"endpoint":{"description":"The target to which the exporter is going to send traces or metrics,\nusing the gRPC protocol. The valid syntax is described at\nhttps://github.com/grpc/grpc/blob/master/doc/naming.md.","title":"endpoint","type":"string"},"headers":{"description":"The headers associated with gRPC requests.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"keepalive":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configgrpc.KeepaliveClientConfig","description":"The keepalive parameters for gRPC client. See grpc.WithKeepaliveParams.\n(https://godoc.org/google.golang.org/grpc#WithKeepaliveParams).","title":"keepalive"},"logs":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configgrpc.GRPCClientSettings","description":"The Coralogix logs ingress endpoint","title":"logs"},"metrics":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configgrpc.GRPCClientSettings","description":"The Coralogix metrics ingress endpoint","title":"metrics"},"private_key":{"description":"Your Coralogix private key (sensitive) for authentication","title":"private_key","type":"string"},"read_buffer_size":{"description":"ReadBufferSize for gRPC client. See grpc.WithReadBufferSize.\n(https://godoc.org/google.golang.org/grpc#WithReadBufferSize).","title":"read_buffer_size","type":"integer"},"retry_on_failure":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.RetrySettings","title":"retry_on_failure"},"sending_queue":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.QueueSettings","title":"sending_queue"},"subsystem_name":{"title":"subsystem_name","type":"string"},"subsystem_name_attributes":{"items":{"type":"string"},"title":"subsystem_name_attributes","type":"array"},"timeout":{"description":"Timeout is the timeout for every attempt to send data to the backend.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"traces":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configgrpc.GRPCClientSettings","description":"Coralogix traces ingress endpoint","title":"traces"},"wait_for_ready":{"description":"WaitForReady parameter configures client to wait for ready state before sending data.\n(https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md)","title":"wait_for_ready","type":"boolean"},"write_buffer_size":{"description":"WriteBufferSize for gRPC gRPC. See grpc.WithWriteBufferSize.\n(https://godoc.org/google.golang.org/grpc#WithWriteBufferSize).","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.datadogexporter.APIConfig":{"additionalProperties":false,"description":"APIConfig defines the API configuration options","markdownDescription":"# Datadog Exporter\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [alpha]: logs |\n| | [beta]: traces, metrics |\n| Distributions | [contrib], [aws], [observiq] |\n\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[aws]: https://github.com/aws-observability/aws-otel-collector\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\n\u003e Please review the Collector's [security documentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/security-best-practices.md), which contains recommendations on securing sensitive information such as the API key required by this exporter.\n\nVisit the [official documentation](https://docs.datadoghq.com/tracing/trace_collection/open_standards/otel_collector_datadog_exporter/) for usage instructions.\n\n## FAQs\n\n### Why am I getting errors 413 - Request Entity Too Large, how do I fix it?\n\nThis error indicates the payload size sent by the Datadog exporter exceeds the size limit (see previous examples https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/16834, https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/17566).\n\nThis is usually caused by the pipeline batching too many telemetry data before sending to the Datadog exporter. To fix that, try lowering `send_batch_size` and `send_batch_max_size` in your batchprocessor config. You might want to have a separate batch processor dedicated for datadog exporter if other exporters expect a larger batch size, e.g.\n```\nprocessors:\n batch: # To be used by other exporters\n timeout: 1s\n # Default value for send_batch_size is 8192\n batch/datadog:\n send_batch_max_size: 100\n send_batch_size: 10\n timeout: 10s\n...\nservice:\n pipelines:\n metrics:\n receivers: ...\n processors: [batch/datadog]\n exporters: [datadog]\n```\n\nThe exact values for `send_batch_size` and `send_batch_max_size` depends on your specific workload. Also note that, Datadog intake has different payload size limits for the 3 signal types:\n- Trace intake: 3.2MB\n- Log intake: https://docs.datadoghq.com/api/latest/logs/\n- Metrics V2 intake: https://docs.datadoghq.com/api/latest/metrics/#submit-metrics\n\n\n[beta]:https://github.com/open-telemetry/opentelemetry-collector#beta\n[alpha]:https://github.com/open-telemetry/opentelemetry-collector#alpha\n[contrib]:https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[AWS]:https://aws-otel.github.io/docs/partners/datadog","properties":{"fail_on_invalid_key":{"description":"FailOnInvalidKey states whether to exit at startup on invalid API key.\nThe default value is false.","title":"fail_on_invalid_key","type":"boolean"},"key":{"description":"Key is the Datadog API key to associate your Agent's data with your organization.\nCreate a new API key here: https://app.datadoghq.com/account/settings","title":"key","type":"string"},"site":{"description":"Site is the site of the Datadog intake to send data to.\nThe default value is \"datadoghq.com\".","title":"site","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.datadogexporter.Config":{"additionalProperties":false,"description":"Config defines configuration for the Datadog exporter.","properties":{"api":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.datadogexporter.APIConfig","description":"API defines the Datadog API configuration.","title":"api"},"host_metadata":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.datadogexporter.HostMetadataConfig","description":"HostMetadata defines the host metadata specific configuration","title":"host_metadata"},"hostname":{"description":"Hostname is the host name for unified service tagging.\nIf unset, it is determined automatically.","title":"hostname","type":"string"},"logs":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.datadogexporter.LogsConfig","description":"Logs defines the Logs exporter specific configuration","title":"logs"},"metrics":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.datadogexporter.MetricsConfig","description":"Metrics defines the Metrics exporter specific configuration","title":"metrics"},"only_metadata":{"description":"OnlyMetadata defines whether to only send metadata\nThis is useful for agent-collector setups, so that\nmetadata about a host is sent to the backend even\nwhen telemetry data is reported via a different host.\n\nThis flag is incompatible with disabling host metadata,\n`use_resource_metadata`, or `host_metadata::hostname_source != first_resource`","title":"only_metadata","type":"boolean"},"retry_on_failure":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.RetrySettings","title":"retry_on_failure"},"sending_queue":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.QueueSettings","title":"sending_queue"},"timeout":{"description":"Timeout is the timeout for every attempt to send data to the backend.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.datadogexporter.LimitedTLSClientSettings","title":"tls"},"traces":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.datadogexporter.TracesConfig","description":"Traces defines the Traces exporter specific configuration","title":"traces"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.datadogexporter.HistogramConfig":{"additionalProperties":false,"description":"HistogramConfig customizes export of OTLP Histograms.","properties":{"mode":{"description":"Mode for exporting histograms. Valid values are 'distributions', 'counters' or 'nobuckets'.\n - 'distributions' sends histograms as Datadog distributions (recommended).\n - 'counters' sends histograms as Datadog counts, one metric per bucket.\n - 'nobuckets' sends no bucket histogram metrics. Aggregation metrics will still be sent\n if `send_aggregation_metrics` is enabled.\n\nThe current default is 'distributions'.","title":"mode","type":"string"},"send_aggregation_metrics":{"description":"SendAggregations states if the exporter should send .sum, .count, .min and .max metrics for histograms.\nThe default is false.","title":"send_aggregation_metrics","type":"boolean"},"send_count_sum_metrics":{"description":"SendCountSum states if the export should send .sum and .count metrics for histograms.\nThe default is false.\nDeprecated: [v0.75.0] Use `send_aggregation_metrics` (HistogramConfig.SendAggregations) instead.","title":"send_count_sum_metrics","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.datadogexporter.HostMetadataConfig":{"additionalProperties":false,"description":"HostMetadataConfig defines the host metadata related configuration.","properties":{"enabled":{"description":"Enabled enables the host metadata functionality.","title":"enabled","type":"boolean"},"hostname_source":{"description":"HostnameSource is the source for the hostname of host metadata.\nValid values are 'first_resource' and 'config_or_system':\n- 'first_resource' picks the host metadata hostname from the resource\n attributes on the first OTLP payload that gets to the exporter.\n If the first payload lacks hostname-like attributes, it will fallback to 'config_or_system'.\n Do not use this hostname source if receiving data from multiple hosts.\n- 'config_or_system' picks the host metadata hostname from the 'hostname' setting,\n If this is empty it will use available system APIs and cloud provider endpoints.\n\nThe default is 'config_or_system'.","title":"hostname_source","type":"string"},"tags":{"description":"Tags is a list of host tags.\nThese tags will be attached to telemetry signals that have the host metadata hostname.\nTo attach tags to telemetry signals regardless of the host, use a processor instead.","items":{"type":"string"},"title":"tags","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.datadogexporter.LimitedTLSClientSettings":{"additionalProperties":false,"description":"LimitedTLSClientSetting is a subset of TLSClientSetting, see LimitedHTTPClientSettings for more details","properties":{"insecure_skip_verify":{"description":"InsecureSkipVerify controls whether a client verifies the server's\ncertificate chain and host name.","title":"insecure_skip_verify","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.datadogexporter.LogsConfig":{"additionalProperties":false,"description":"LogsConfig defines logs exporter specific configuration","properties":{"dump_payloads":{"description":"DumpPayloads report whether payloads should be dumped when logging level is debug.","title":"dump_payloads","type":"boolean"},"endpoint":{"description":"Endpoint configures the address for this network connection.\nThe address has the form \"host:port\". The host must be a literal IP address, or a host name that can be\nresolved to IP addresses. The port must be a literal port number or a service name.\nIf the host is a literal IPv6 address it must be enclosed in square brackets, as in \"[2001:db8::1]:80\" or\n\"[fe80::1%zone]:80\". The zone specifies the scope of the literal IPv6 address as defined in RFC 4007.","title":"endpoint","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.datadogexporter.MetricsConfig":{"additionalProperties":false,"description":"MetricsConfig defines the metrics exporter specific configuration options","properties":{"delta_ttl":{"description":"DeltaTTL defines the time that previous points of a cumulative monotonic\nmetric are kept in memory to calculate deltas","title":"delta_ttl","type":"integer"},"endpoint":{"description":"Endpoint configures the address for this network connection.\nThe address has the form \"host:port\". The host must be a literal IP address, or a host name that can be\nresolved to IP addresses. The port must be a literal port number or a service name.\nIf the host is a literal IPv6 address it must be enclosed in square brackets, as in \"[2001:db8::1]:80\" or\n\"[fe80::1%zone]:80\". The zone specifies the scope of the literal IPv6 address as defined in RFC 4007.","title":"endpoint","type":"string"},"histograms":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.datadogexporter.HistogramConfig","description":"HistConfig defines the export of OTLP Histograms.","title":"histograms"},"instrumentation_scope_metadata_as_tags":{"description":"InstrumentationScopeMetadataAsTags, if set to true, adds the name and version of the\ninstrumentation scope that created a metric to the metric tags","title":"instrumentation_scope_metadata_as_tags","type":"boolean"},"resource_attributes_as_tags":{"description":"ResourceAttributesAsTags, if set to true, will use the exporterhelper feature to transform all\nresource attributes into metric labels, which are then converted into tags","title":"resource_attributes_as_tags","type":"boolean"},"summaries":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.datadogexporter.SummaryConfig","description":"SummaryConfig defines the export for OTLP Summaries.","title":"summaries"},"sums":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.datadogexporter.SumConfig","description":"SumConfig defines the export of OTLP Sums.","title":"sums"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.datadogexporter.SumConfig":{"additionalProperties":false,"description":"SumConfig customizes export of OTLP Sums.","properties":{"cumulative_monotonic_mode":{"description":"CumulativeMonotonicMode is the mode for exporting OTLP Cumulative Monotonic Sums.\nValid values are 'to_delta' or 'raw_value'.\n - 'to_delta' calculates delta for cumulative monotonic sums and sends it as a Datadog count.\n - 'raw_value' sends the raw value of cumulative monotonic sums as Datadog gauges.\n\nThe default is 'to_delta'.\nSee https://docs.datadoghq.com/metrics/otlp/?tab=sum#mapping for details and examples.","title":"cumulative_monotonic_mode","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.datadogexporter.SummaryConfig":{"additionalProperties":false,"description":"SummaryConfig customizes export of OTLP Summaries.","properties":{"mode":{"description":"Mode is the the mode for exporting OTLP Summaries.\nValid values are 'noquantiles' or 'gauges'.\n - 'noquantiles' sends no `.quantile` metrics. `.sum` and `.count` metrics will still be sent.\n - 'gauges' sends `.quantile` metrics as gauges tagged by the quantile.\n\nThe default is 'gauges'.\nSee https://docs.datadoghq.com/metrics/otlp/?tab=summary#mapping for details and examples.","title":"mode","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.datadogexporter.TracesConfig":{"additionalProperties":false,"description":"TracesConfig defines the traces exporter specific configuration options","properties":{"compute_stats_by_span_kind":{"description":"If set to true, enables an additional stats computation check on spans to see they have an eligible `span.kind` (server, consumer, client, producer).\nIf enabled, a span with an eligible `span.kind` will have stats computed. If disabled, only top-level and measured spans will have stats computed.\nNOTE: For stats computed from OTel traces, only top-level spans are considered when this option is off.","title":"compute_stats_by_span_kind","type":"boolean"},"endpoint":{"description":"Endpoint configures the address for this network connection.\nThe address has the form \"host:port\". The host must be a literal IP address, or a host name that can be\nresolved to IP addresses. The port must be a literal port number or a service name.\nIf the host is a literal IPv6 address it must be enclosed in square brackets, as in \"[2001:db8::1]:80\" or\n\"[fe80::1%zone]:80\". The zone specifies the scope of the literal IPv6 address as defined in RFC 4007.","title":"endpoint","type":"string"},"ignore_resources":{"description":"ignored resources\nA blacklist of regular expressions can be provided to disable certain traces based on their resource name\nall entries must be surrounded by double quotes and separated by commas.\nignore_resources: [\"(GET|POST) /healthcheck\"]","items":{"type":"string"},"title":"ignore_resources","type":"array"},"peer_service_aggregation":{"description":"If set to true, enables `peer.service` aggregation in the exporter. If disabled, aggregated trace stats will not include `peer.service` as a dimension.\nFor the best experience with `peer.service`, it is recommended to also enable `compute_stats_by_span_kind`.\nIf enabling both causes the datadog exporter to consume too many resources, try disabling `compute_stats_by_span_kind` first.\nIf the overhead remains high, it will be due to a high cardinality of `peer.service` values from the traces. You may need to check your instrumentation.","title":"peer_service_aggregation","type":"boolean"},"span_name_as_resource_name":{"description":"If set to true the OpenTelemetry span name will used in the Datadog resource name.\nIf set to false the resource name will be filled with the instrumentation library name + span kind.\nThe default value is `false`.","title":"span_name_as_resource_name","type":"boolean"},"span_name_remappings":{"description":"SpanNameRemappings is the map of datadog span names and preferred name to map to. This can be used to\nautomatically map Datadog Span Operation Names to an updated value. All entries should be key/value pairs.\nspan_name_remappings:\n io.opentelemetry.javaagent.spring.client: spring.client\n instrumentation:express.server: express\n go.opentelemetry.io_contrib_instrumentation_net_http_otelhttp.client: http.client","patternProperties":{".*":{"type":"string"}},"title":"span_name_remappings","type":"object"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.datasetexporter.BufferSettings":{"additionalProperties":false,"properties":{"group_by":{"items":{"type":"string"},"title":"group_by","type":"array"},"max_lifetime":{"title":"max_lifetime","type":"string"},"retry_initial_interval":{"title":"retry_initial_interval","type":"string"},"retry_max_elapsed_time":{"title":"retry_max_elapsed_time","type":"string"},"retry_max_interval":{"title":"retry_max_interval","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.datasetexporter.Config":{"additionalProperties":false,"properties":{"api_key":{"title":"api_key","type":"string"},"buffer":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.datasetexporter.BufferSettings","title":"buffer"},"dataset_url":{"title":"dataset_url","type":"string"},"logs":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.datasetexporter.LogsSettings","title":"logs"},"retry_on_failure":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.RetrySettings","title":"retry_on_failure"},"sending_queue":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.QueueSettings","title":"sending_queue"},"timeout":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.TimeoutSettings","title":"timeout"},"traces":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.datasetexporter.TracesSettings","title":"traces"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.datasetexporter.LogsSettings":{"additionalProperties":false,"properties":{"export_resource_info_on_event":{"description":"ExportResourceInfo is optional flag to signal that the resource info is being exported to DataSet while exporting Logs.\nThis is especially useful when reducing DataSet billable log volume.\nDefault value: false.","title":"export_resource_info_on_event","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.datasetexporter.TracesSettings":{"additionalProperties":false,"properties":{"aggregate":{"title":"aggregate","type":"boolean"},"max_wait":{"title":"max_wait","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.dynatraceexporter.config.Config":{"additionalProperties":false,"description":"Config defines configuration for the Dynatrace exporter.","properties":{"CustomRoundTripper":{"description":"Custom Round Tripper to allow for individual components to intercept HTTP requests","title":"CustomRoundTripper"},"api_token":{"description":"Dynatrace API token with metrics ingest permission","title":"api_token","type":"string"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing HTTP calls.","title":"auth"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"default_dimensions":{"description":"DefaultDimensions will be added to all exported metrics","patternProperties":{".*":{"type":"string"}},"title":"default_dimensions","type":"object"},"endpoint":{"description":"The target URL to send data to (e.g.: http://some.url:9411/v1/traces).","title":"endpoint","type":"string"},"headers":{"description":"Additional headers attached to each HTTP request sent by the client.\nExisting header values are overwritten if collision happens.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"idle_conn_timeout":{"description":"IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"idle_conn_timeout","type":"string"},"max_conns_per_host":{"description":"MaxConnsPerHost limits the total number of connections per host, including connections in the dialing,\nactive, and idle states.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_conns_per_host","type":"integer"},"max_idle_conns":{"description":"MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns","type":"integer"},"max_idle_conns_per_host":{"description":"MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns_per_host","type":"integer"},"prefix":{"description":"String to prefix all metric names","title":"prefix","type":"string"},"read_buffer_size":{"description":"ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize.","title":"read_buffer_size","type":"integer"},"resource_to_telemetry_conversion":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.pkg.resourcetotelemetry.Settings","title":"resource_to_telemetry_conversion"},"retry_on_failure":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.RetrySettings","title":"retry_on_failure"},"sending_queue":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.QueueSettings","title":"sending_queue"},"tags":{"description":"Tags will be added to all exported metrics\nDeprecated: Please use DefaultDimensions instead","items":{"type":"string"},"title":"tags","type":"array"},"timeout":{"description":"Timeout parameter configures `http.Client.Timeout`.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"write_buffer_size":{"description":"WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize.","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.elasticsearchexporter.Config":{"additionalProperties":false,"description":"Config defines configuration for Elastic exporter.","markdownDescription":"# Elasticsearch Exporter\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: traces, logs |\n| Distributions | [contrib], [observiq] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThis exporter supports sending OpenTelemetry logs to [Elasticsearch](https://www.elastic.co/elasticsearch).\n\n## Configuration options\n\n- `endpoints`: List of Elasticsearch URLs. If endpoints and cloudid is missing, the\n ELASTICSEARCH_URL environment variable will be used.\n- `cloudid` (optional):\n [ID](https://www.elastic.co/guide/en/cloud/current/ec-cloud-id.html) of the\n Elastic Cloud Cluster to publish events to. The `cloudid` can be used instead\n of `endpoints`.\n- `num_workers` (optional): Number of workers publishing bulk requests concurrently.\n- `index`: The\n [index](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices.html)\n or [datastream](https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html)\n name to publish events to. The default value is `logs-generic-default`. Note: To better differentiate between log indexes and traces indexes, `index` option are deprecated and replaced with below `logs_index`\n- `logs_index`: The\n [index](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices.html)\n or [datastream](https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html)\n name to publish events to. The default value is `logs-generic-default`\n- `logs_dynamic_index` (optional): \n takes resource or log record attribute named `elasticsearch.index.prefix` and `elasticsearch.index.suffix`\n resulting dynamically prefixed / suffixed indexing based on `logs_index`. (priority: resource attribute \u003e log record attribute)\n - `enabled`(default=false): Enable/Disable dynamic index for log records\n- `traces_index`: The\n [index](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices.html)\n or [datastream](https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html)\n name to publish traces to. The default value is `traces-generic-default`.\n- `traces_dynamic_index` (optional):\n takes resource or span attribute named `elasticsearch.index.prefix` and `elasticsearch.index.suffix`\n resulting dynamically prefixed / suffixed indexing based on `traces_index`. (priority: resource attribute \u003e span attribute)\n - `enabled`(default=false): Enable/Disable dynamic index for trace spans\n- `pipeline` (optional): Optional [Ingest Node](https://www.elastic.co/guide/en/elasticsearch/reference/current/ingest.html)\n pipeline ID used for processing documents published by the exporter.\n- `flush`: Event bulk buffer flush settings\n - `bytes` (default=5242880): Write buffer flush limit.\n - `interval` (default=30s): Write buffer time limit.\n- `retry`: Event retry settings\n - `enabled` (default=true): Enable/Disable event retry on error. Retry\n support is enabled by default.\n - `max_requests` (default=3): Number of HTTP request retries.\n - `initial_interval` (default=100ms): Initial waiting time if a HTTP request failed.\n - `max_interval` (default=1m): Max waiting time if a HTTP request failed.\n- `mapping`: Events are encoded to JSON. The `mapping` allows users to\n configure additional mapping rules.\n - `mode` (default=ecs): The fields naming mode. valid modes are:\n - `none`: Use original fields and event structure from the OTLP event.\n - `ecs`: Try to map fields defined in the\n [OpenTelemetry Semantic Conventions](https://github.com/open-telemetry/semantic-conventions)\n to [Elastic Common Schema (ECS)](https://www.elastic.co/guide/en/ecs/current/index.html).\n - `fields` (optional): Configure additional fields mappings.\n - `file` (optional): Read additional field mappings from the provided YAML file.\n - `dedup` (default=true): Try to find and remove duplicate fields/attributes\n from events before publishing to Elasticsearch. Some structured logging\n libraries can produce duplicate fields (for example zap). Elasticsearch\n will reject documents that have duplicate fields.\n - `dedot` (default=true): When enabled attributes with `.` will be split into\n proper json objects.\n- `sending_queue`\n - `enabled` (default = false)\n - `num_consumers` (default = 10): Number of consumers that dequeue batches; ignored if `enabled` is `false`\n - `queue_size` (default = 1000): Maximum number of batches kept in memory before data; ignored if `enabled` is `false`;\n### HTTP settings\n\n- `read_buffer_size` (default=0): Read buffer size.\n- `write_buffer_size` (default=0): Write buffer size used when.\n- `timeout` (default=90s): HTTP request time limit.\n- `headers` (optional): Headers to be send with each HTTP request.\n\n### Security and Authentication settings\n\n- `user` (optional): Username used for HTTP Basic Authentication.\n- `password` (optional): Password used for HTTP Basic Authentication.\n- `api_key` (optional): Authorization [API Key](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-api-key.html).\n\n### TLS settings\n- `ca_file` (optional): Root Certificate Authority (CA) certificate, for\n verifying the server's identity, if TLS is enabled.\n- `cert_file` (optional): Client TLS certificate.\n- `key_file` (optional): Client TLS key.\n- `insecure` (optional): In gRPC when set to true, this is used to disable the client transport security. In HTTP, this disables verifying the server's certificate chain and host name.\n- `insecure_skip_verify` (optional): Will enable TLS but not verify the certificate.\n is enabled.\n\n### Node Discovery\n\nThe Elasticsearch Exporter will check Elasticsearch regularly for available\nnodes and updates the list of hosts if discovery is enabled. Newly discovered\nnodes will automatically be used for load balancing.\n\n- `discover`:\n - `on_start` (optional): If enabled the exporter queries Elasticsearch\n for all known nodes in the cluster on startup.\n - `interval` (optional): Interval to update the list of Elasticsearch nodes.\n\n## Example\n\n```yaml\nexporters:\n elasticsearch/trace:\n endpoints: [https://elastic.example.com:9200]\n traces_index: trace_index\n elasticsearch/log:\n endpoints: [http://localhost:9200]\n logs_index: my_log_index\n sending_queue:\n enabled: true\n num_consumers: 20\n queue_size: 1000\n······\nservice:\n pipelines:\n logs:\n receivers: [otlp]\n processors: [batch]\n exporters: [elasticsearch/log]\n traces:\n receivers: [otlp]\n exporters: [elasticsearch/trace]\n processors: [batch]\n```","properties":{"api_key":{"description":"APIKey is used to configure ApiKey based Authentication.\n\nhttps://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-api-key.html","title":"api_key","type":"string"},"cloudid":{"description":"CloudID holds the cloud ID to identify the Elastic Cloud cluster to send events to.\nhttps://www.elastic.co/guide/en/cloud/current/ec-cloud-id.html\n\nThis setting is required if no URL is configured.","title":"cloudid","type":"string"},"discover":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.elasticsearchexporter.DiscoverySettings","title":"discover"},"endpoints":{"description":"Endpoints holds the Elasticsearch URLs the exporter should send events to.\n\nThis setting is required if CloudID is not set and if the\nELASTICSEARCH_URL environment variable is not set.","items":{"type":"string"},"title":"endpoints","type":"array"},"flush":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.elasticsearchexporter.FlushSettings","title":"flush"},"headers":{"description":"Headers allows users to configure optional HTTP headers that\nwill be send with each HTTP request.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"index":{"description":"Index configures the index, index alias, or data stream name events should be indexed in.\n\nhttps://www.elastic.co/guide/en/elasticsearch/reference/current/indices.html\nhttps://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html\n\nDeprecated: `index` is deprecated and replaced with `logs_index`.","title":"index","type":"string"},"logs_dynamic_index":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.elasticsearchexporter.DynamicIndexSetting","description":"fall back to pure LogsIndex, if 'elasticsearch.index.prefix' or 'elasticsearch.index.suffix' are not found in resource or attribute (prio: resource \u003e attribute)","title":"logs_dynamic_index"},"logs_index":{"description":"This setting is required when logging pipelines used.","title":"logs_index","type":"string"},"mapping":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.elasticsearchexporter.MappingsSettings","title":"mapping"},"num_workers":{"description":"NumWorkers configures the number of workers publishing bulk requests.","title":"num_workers","type":"integer"},"password":{"description":"Password is used to configure HTTP Basic Authentication.","title":"password","type":"string"},"pipeline":{"description":"Pipeline configures the ingest node pipeline name that should be used to process the\nevents.\n\nhttps://www.elastic.co/guide/en/elasticsearch/reference/current/ingest.html","title":"pipeline","type":"string"},"read_buffer_size":{"description":"ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize.","title":"read_buffer_size","type":"integer"},"retry":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.elasticsearchexporter.RetrySettings","title":"retry"},"sending_queue":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.QueueSettings","title":"sending_queue"},"timeout":{"description":"Timeout configures the HTTP request timeout.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","title":"tls"},"traces_dynamic_index":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.elasticsearchexporter.DynamicIndexSetting","description":"fall back to pure TracesIndex, if 'elasticsearch.index.prefix' or 'elasticsearch.index.suffix' are not found in resource or attribute (prio: resource \u003e attribute)","title":"traces_dynamic_index"},"traces_index":{"description":"This setting is required when traces pipelines used.","title":"traces_index","type":"string"},"user":{"description":"User is used to configure HTTP Basic Authentication.","title":"user","type":"string"},"write_buffer_size":{"description":"WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize.","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.elasticsearchexporter.DiscoverySettings":{"additionalProperties":false,"description":"DiscoverySettings defines Elasticsearch node discovery related settings.","properties":{"interval":{"description":"Interval instructs the exporter to renew the list of Elasticsearch URLs\nwith the given interval. URLs will not be updated if Interval is \u003c=0.","title":"interval","type":"string"},"on_start":{"description":"OnStart, if set, instructs the exporter to look for available Elasticsearch\nnodes the first time the exporter connects to the cluster.","title":"on_start","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.elasticsearchexporter.DynamicIndexSetting":{"additionalProperties":false,"properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.elasticsearchexporter.FlushSettings":{"additionalProperties":false,"description":"FlushSettings defines settings for configuring the write buffer flushing policy in the Elasticsearch exporter.","properties":{"bytes":{"description":"Bytes sets the send buffer flushing limit.","title":"bytes","type":"integer"},"interval":{"description":"Interval configures the max age of a document in the send buffer.","title":"interval","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.elasticsearchexporter.MappingsSettings":{"additionalProperties":false,"properties":{"dedot":{"title":"dedot","type":"boolean"},"dedup":{"description":"Try to find and remove duplicate fields","title":"dedup","type":"boolean"},"fields":{"description":"Additional field mappings.","patternProperties":{".*":{"type":"string"}},"title":"fields","type":"object"},"file":{"description":"File to read additional fields mappings from.","title":"file","type":"string"},"mode":{"description":"Mode configures the field mappings.","title":"mode","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.elasticsearchexporter.RetrySettings":{"additionalProperties":false,"description":"RetrySettings defines settings for the HTTP request retries in the Elasticsearch exporter.","properties":{"enabled":{"description":"Enabled allows users to disable retry without having to comment out all settings.","title":"enabled","type":"boolean"},"initial_interval":{"description":"InitialInterval configures the initial waiting time if a request failed.","title":"initial_interval","type":"string"},"max_interval":{"description":"MaxInterval configures the max waiting time if consecutive requests failed.","title":"max_interval","type":"string"},"max_requests":{"description":"MaxRequests configures how often an HTTP request is retried before it is assumed to be failed.","title":"max_requests","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.f5cloudexporter.AuthConfig":{"additionalProperties":false,"description":"AuthConfig defines F5 Cloud authentication configurations for F5CloudAuthRoundTripper","properties":{"audience":{"description":"Audience is the F5 Cloud audience for your designated account.","title":"audience","type":"string"},"credential_file":{"description":"CredentialFile is the F5 Cloud credentials for your designated account.","title":"credential_file","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.f5cloudexporter.Config":{"additionalProperties":false,"description":"Config defines configuration for F5 Cloud exporter.","properties":{"CustomRoundTripper":{"description":"Custom Round Tripper to allow for individual components to intercept HTTP requests","title":"CustomRoundTripper"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing HTTP calls.","title":"auth"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"endpoint":{"description":"The target URL to send data to (e.g.: http://some.url:9411/v1/traces).","title":"endpoint","type":"string"},"f5cloud_auth":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.f5cloudexporter.AuthConfig","description":"AuthConfig represents the F5 Cloud authentication configuration options.","title":"f5cloud_auth"},"headers":{"description":"Additional headers attached to each HTTP request sent by the client.\nExisting header values are overwritten if collision happens.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"idle_conn_timeout":{"description":"IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"idle_conn_timeout","type":"string"},"logs_endpoint":{"description":"The URL to send logs to. If omitted the Endpoint + \"/v1/logs\" will be used.","title":"logs_endpoint","type":"string"},"max_conns_per_host":{"description":"MaxConnsPerHost limits the total number of connections per host, including connections in the dialing,\nactive, and idle states.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_conns_per_host","type":"integer"},"max_idle_conns":{"description":"MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns","type":"integer"},"max_idle_conns_per_host":{"description":"MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns_per_host","type":"integer"},"metrics_endpoint":{"description":"The URL to send metrics to. If omitted the Endpoint + \"/v1/metrics\" will be used.","title":"metrics_endpoint","type":"string"},"read_buffer_size":{"description":"ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize.","title":"read_buffer_size","type":"integer"},"retry_on_failure":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.RetrySettings","title":"retry_on_failure"},"sending_queue":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.QueueSettings","title":"sending_queue"},"source":{"description":"Source represents a unique identifier that is used to distinguish where this data is coming from.","title":"source","type":"string"},"timeout":{"description":"Timeout parameter configures `http.Client.Timeout`.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"traces_endpoint":{"description":"The URL to send traces to. If omitted the Endpoint + \"/v1/traces\" will be used.","title":"traces_endpoint","type":"string"},"write_buffer_size":{"description":"WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize.","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.fileexporter.Config":{"additionalProperties":false,"description":"Config defines configuration for file exporter.","properties":{"compression":{"description":"Compression Codec used to export telemetry data\nSupported compression algorithms:`zstd`","title":"compression","type":"string"},"flush_interval":{"description":"FlushInterval is the duration between flushes.\nSee time.ParseDuration for valid values.","title":"flush_interval","type":"string"},"format":{"description":"FormatType define the data format of encoded telemetry data\nOptions:\n- json[default]: OTLP json bytes.\n- proto: OTLP binary protobuf bytes.","title":"format","type":"string"},"path":{"description":"Path of the file to write to. Path is relative to current directory.","title":"path","type":"string"},"rotation":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.fileexporter.Rotation","description":"Rotation defines an option about rotation of telemetry files","title":"rotation"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.fileexporter.Rotation":{"additionalProperties":false,"description":"Rotation an option to rolling log files","properties":{"localtime":{"description":"LocalTime determines if the time used for formatting the timestamps in\nbackup files is the computer's local time. The default is to use UTC\ntime.","title":"localtime","type":"boolean"},"max_backups":{"description":"MaxBackups is the maximum number of old log files to retain. The default\nis to 100 files.","title":"max_backups","type":"integer"},"max_days":{"description":"MaxDays is the maximum number of days to retain old log files based on the\ntimestamp encoded in their filename. Note that a day is defined as 24\nhours and may not exactly correspond to calendar days due to daylight\nsavings, leap seconds, etc. The default is not to remove old log files\nbased on age.","title":"max_days","type":"integer"},"max_megabytes":{"description":"MaxMegabytes is the maximum size in megabytes of the file before it gets\nrotated. It defaults to 100 megabytes.","title":"max_megabytes","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.googlecloudexporter.Config":{"additionalProperties":false,"description":"Config defines configuration for Google Cloud exporter.","markdownDescription":"# Google Cloud Exporter\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: traces, metrics, logs |\n| Distributions | [contrib], [observiq] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThis exporter can be used to send metrics to [Google Cloud Monitoring](https://cloud.google.com/monitoring)\n(formerly Stackdriver), traces to [Google Cloud Trace](https://cloud.google.com/trace),\nand logs to [Google Cloud Logging](https://cloud.google.com/logging).\n\n## Getting started\n\n### Prerequisite: Authenticating\n\nIn general, authenticating with the Collector exporter follows the same steps as\nany other app using the steps documented for [Application Default\nCredentials](https://cloud.google.com/docs/authentication/provide-credentials-adc). This\nsection explains the specific use cases relevant to the exporter.\n\nThe exporter relies on GCP client libraries to send data to Google Cloud. Use of these libraries requires the caller (the Collector) to be authenticated with a GCP account and project. This should be done using a [GCP service account](https://cloud.google.com/compute/docs/access/service-accounts) with at minimum the following IAM roles (depending on the type of data you wish to send):\n\n* [Metrics](https://cloud.google.com/iam/docs/understanding-roles#monitoring-roles): `roles/monitoring.metricWriter`\n* [Traces](https://cloud.google.com/iam/docs/understanding-roles#cloud-trace-roles): `roles/cloudtrace.agent`\n* [Logs](https://cloud.google.com/iam/docs/understanding-roles#logging-roles): `roles/logging.logWriter`\n\nThe [Compute Engine default service account](https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) has all of these permissions by default, but if you are running on a different platform or with a different GCP service account you will need to ensure your service account has these permissions.\n\n#### Options for different environments\n\nDepending on the environment where your Collector is running, you can authenticate one of several ways:\n\n**GCE instances**\n\nOn GCE it is recommended to use the [GCP service account](https://cloud.google.com/compute/docs/access/service-accounts) associated with your instance. If this is the Compute Engine default service account or another GCP service account with the sufficient IAM permissions, then there is nothing additional you need to do to authenticate the Collector process. Simply run the Collector on your instance, and it will inherit these permissions.\n\n**GKE / Workload Identity**\n\nOn GKE clusters with Workload Identity enabled (including GKE Autopilot), follow [the steps to configure a Workload Identity ServiceAccount in your cluster](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity) (if you do not already have one). Then, deploy the Collector as you would with any other workload, setting the `serviceAccountName` field in the Collector Pod’s `.spec` to the WI-enabled ServiceAccount.\n\nIn non-WI clusters, you can use the GCP service account associated with the node the same way as in the instructions for GCE instances above.\n\n**Non-GCP (AWS, Azure, on-prem, etc.) or alternative service accounts**\n\nIn non-GCP environments, a [service account key](https://cloud.google.com/iam/docs/keys-create-delete#iam-service-account-keys-create-console) or credentials file is required. The exporter will automatically look for this file using the `GOOGLE_APPLICATION_CREDENTIALS` environment variable or, if that is unset, one of the [other known locations](https://cloud.google.com/docs/authentication/application-default-credentials). Note that when using this approach, you may need to explicitly set the `project` option in the exporter’s config.\n\nWhen running the Collector in a Docker container, a credentials file can be passed to the container via volume mounts and environment variables at runtime like so:\n\n```\ndocker run \\\n --volume ~/service-account-key.json:/etc/otel/key.json \\\n --volume $(pwd)/config.yaml:/etc/otel/config.yaml \\\n --env GOOGLE_APPLICATION_CREDENTIALS=/etc/otel/key.json \\\n --expose 4317 \\\n --expose 55681 \\\n --rm \\\n otel/opentelemetry-collector-contrib\n```\n\n**Using `gcloud auth application-default login`**\n\nUsing [`gcloud auth application-default login`](https://cloud.google.com/docs/authentication/application-default-credentials) to authenticate is not recommended for production use. Instead, it’s best to use a GCP service account through one of the methods listed above. The `gcloud auth` command can be useful for development and testing on a user account, and authenticating with it follows the same approach as the service account key method above.\n\n\nThese instructions are to get you up and running quickly with the GCP exporter in a local development environment. We'll also point out alternatives that may be more suitable for CI or production.\n\n1. **Obtain a Collector binary.** Pull a binary or Docker image for the\n OpenTelemetry contrib collector which includes the GCP exporter plugin\n through one of the following:\n\n * Download a [binary or package of the OpenTelemetry\n Collector Contrib](https://github.com/open-telemetry/opentelemetry-collector-releases/releases)\n that is appropriate for your platform, and includes the Google Cloud\n exporter.\n * Pull a Docker image with `docker pull otel/opentelemetry-collector-contrib`\n * Create your own main package in Go, that pulls in just the plugins you need.\n * Use the [OpenTelemetry Collector\n Builder](https://github.com/open-telemetry/opentelemetry-collector-builder)\n to generate the Go main package and `go.mod`.\n\n2. **Create a configuration file `config.yaml`.** The example below shows a minimal recommended configuration that receives OTLP and sends data to GCP, in addition to verbose logging to help understand what is going on. It uses application default credentials (which we will set up in the next step).\n\n Note that this configuration includes the recommended `memory_limiter` and `batch` plugins, which avoid high latency for reporting telemetry, and ensure that the collector itself will stay stable (not run out of memory) by dropping telemetry if needed.\n\n ```yaml\n receivers:\n otlp:\n protocols:\n grpc:\n http:\n exporters:\n googlecloud:\n log:\n default_log_name: opentelemetry.io/collector-exported-log\n processors:\n memory_limiter:\n check_interval: 1s\n limit_percentage: 65\n spike_limit_percentage: 20\n batch:\n resourcedetection:\n detectors: [gcp]\n timeout: 10s\n service:\n pipelines:\n traces:\n receivers: [otlp]\n processors: [memory_limiter, batch]\n exporters: [googlecloud]\n metrics:\n receivers: [otlp]\n processors: [memory_limiter, batch]\n exporters: [googlecloud]\n logs:\n receivers: [otlp]\n processors: [memory_limiter, batch]\n exporters: [googlecloud]\n ```\n\n3. **Set up credentials.**\n\n 1. Enable billing in your GCP project.\n\n 2. Enable the Cloud Metrics and Cloud Trace APIs.\n\n 3. Ensure that your user GCP user has (at minimum) `roles/monitoring.metricWriter` and `roles/cloudtrace.agent`. You can learn about [metric-related](https://cloud.google.com/monitoring/access-control) and [trace-related](https://cloud.google.com/trace/docs/iam) IAM in the GCP documentation.\n\n 4. Obtain credentials using one of the methods in the [Authenticating\n section](#prerequisite-authenticating) above.\n\n\n4. **Run the collector.** The following runs the collector in the foreground, so please execute it in a separate terminal.\n\n ```sh\n ./otelcol-contrib --config=config.yaml\n ```\n\n \u003cdetails\u003e\n \u003csummary\u003eAlternatives\u003c/summary\u003e\n\n If you obtained OS-specific packages or built your own binary in step 1, you'll need to follow the appropriate conventions for running the collector.\n\n \u003c/details\u003e\n\n5. **Gather telemetry.** Run an application that can submit OTLP-formatted metrics and traces, and configure it to send them to `127.0.0.1:4317` (for gRPC) or `127.0.0.1:55681` (for HTTP).\n\n \u003cdetails\u003e\n \u003csummary\u003eAlternatives\u003c/summary\u003e\n\n * Set up the host metrics receiver, which will gather telemetry from the host without needing an external application to submit telemetry.\n\n * Set up an application-specific receiver, such as the Nginx receiver, and run the corresponding application.\n\n * Set up a receiver for some other protocol (such Prometheus, StatsD, Zipkin or Jaeger), and run an application that speaks one of those protocols.\n \u003c/details\u003e\n\n6. **View telemetry in GCP.** Use the GCP [metrics explorer](https://console.cloud.google.com/monitoring/metrics-explorer) and [trace overview](https://console.cloud.google.com/traces) to view your newly submitted telemetry.\n\n## Configuration reference\n\nThe following configuration options are supported:\n\n- `project` (default = Fetch from Credentials): GCP project identifier.\n- `destination_project_quota` (optional): Counts quota for traces and metrics against the project to which the data is sent (as opposed to the project associated with the Collector's service account. For example, when setting `project_id` or using [multi-project export](#multi-project-exporting). (default = false)\n- `user_agent` (default = `opentelemetry-collector-contrib {{version}}`): Override the user agent string sent on requests to Cloud Monitoring (currently only applies to metrics). Specify `{{version}}` to include the application version number.\n- `impersonate` (optional): Configuration for service account impersonation\n - `target_principal`: TargetPrincipal is the email address of the service account to impersonate.\n - `subject`: (optional) Subject is the sub field of a JWT. This field should only be set if you wish to impersonate as a user. This feature is useful when using domain wide delegation.\n - `delegates`: (default = []) Delegates are the service account email addresses in a delegation chain. Each service account must be granted roles/iam.serviceAccountTokenCreatoron the next service account in the chain.\n- `metric` (optional): Configuration for sending metrics to Cloud Monitoring.\n - `prefix` (default = `workload.googleapis.com`): The prefix to add to metrics.\n - `endpoint` (default = monitoring.googleapis.com): Endpoint where metric data is going to be sent to.\n - `use_insecure` (default = false): If true, use gRPC as their communication transport. Only has effect if Endpoint is not \"\".\n - `known_domains` (default = [googleapis.com, kubernetes.io, istio.io, knative.dev]): If a metric belongs to one of these domains it does not get a prefix.\n - `skip_create_descriptor` (default = false): If set to true, do not send metric descriptors to GCM.\n - `instrumentation_library_labels` (default = true): If true, set the instrumentation_source and instrumentation_version labels.\n - `create_service_timeseries` (default = false): If true, this will send all timeseries using `CreateServiceTimeSeries`. Implicitly, this sets `skip_create_descriptor` to true.\n - `create_metric_descriptor_buffer_size` (default = 10): Buffer size for the channel which asynchronously calls CreateMetricDescriptor.\n - `service_resource_labels` (default = true): If true, the exporter will copy OTel's service.name, service.namespace, and service.instance.id resource attributes into the GCM timeseries metric labels.\n - `resource_filters` (default = []): If provided, resource attributes matching any filter will be included in metric labels. Can be defined by `prefix`, `regex`, or `prefix` AND `regex`.\n - `prefix`: Match resource keys by prefix.\n - `regex`: Match resource keys by regex.\n - `cumulative_normalization` (default = true): If true, normalizes cumulative metrics without start times or with explicit reset points by subtracting subsequent points from the initial point. It is enabled by default. Since it caches starting points, it may result inincreased memory usage.\n - `sum_of_squared_deviation` (default = false): If true, enables calculation of an estimated sum of squared deviation. It is an estimate, and is not exact.\n - `compression` (optional): Enable gzip compression for gRPC requests (valid vlaues: `gzip`).\n - `experimental_wal` (default = []): If provided, enables use of a write ahead\n log for time series requests.\n - `directory` (default = `./`): Path to local directory for WAL file.\n - `max_backoff` (default = `1h`): Max duration to retry requests on network\n errors (`UNAVAILABLE` or `DEADLINE_EXCEEDED`).\n- `trace` (optional): Configuration for sending traces to Cloud Trace.\n - `endpoint` (default = cloudtrace.googleapis.com): Endpoint where trace data is going to be sent to.\n - `use_insecure` (default = false): If true. use gRPC as their communication transport. Only has effect if Endpoint is not \"\". Replaces `use_insecure`.\n - `attribute_mappings` (optional): AttributeMappings determines how to map from OpenTelemetry attribute keys to Google Cloud Trace keys. By default, it changes http and service keys so that they appear more prominently in the UI.\n - `key`: Key is the OpenTelemetry attribute key\n - `replacement`: Replacement is the attribute sent to Google Cloud Trace\n- `log` (optional): Configuration for sending metrics to Cloud Logging.\n - `endpoint` (default = logging.googleapis.com): Endpoint where log data is going to be sent to. D\n - `use_insecure` (default = false): If true, use gRPC as their communication transport. Only has effect if Endpoint is not \"\".\n - `default_log_name` (optional): Defines a default name for log entries. If left unset, and a log entry does not have the `gcp.log_name` attribute set, the exporter will return an error processing that entry.\n - `resource_filters` (default = []): If provided, resource attributes matching any filter will be included in log labels. Can be defined by `prefix`, `regex`, or `prefix` AND `regex`.\n - `prefix`: Match resource keys by prefix.\n - `regex`: Match resource keys by regex.\n - `compression` (optional): Enable gzip compression for gRPC requests (valid vlaues: `gzip`).\n- `retry_on_failure` (optional): Configuration for how to handle retries when sending data to Google Cloud fails.\n - `enabled` (default = false)\n - `initial_interval` (default = 5s): Time to wait after the first failure before retrying; ignored if `enabled` is `false`\n - `max_interval` (default = 30s): Is the upper bound on backoff; ignored if `enabled` is `false`\n - `max_elapsed_time` (default = 120s): Is the maximum amount of time spent trying to send a batch; ignored if `enabled` is `false`\n- `sending_queue` (optional): Configuration for how to buffer traces before sending.\n - `enabled` (default = true)\n - `num_consumers` (default = 10): Number of consumers that dequeue batches; ignored if `enabled` is `false`\n - `queue_size` (default = 1000): Maximum number of batches kept in memory before data; ignored if `enabled` is `false`;\n User should calculate this as `num_seconds * requests_per_second` where:\n - `num_seconds` is the number of seconds to buffer in case of a backend outage\n - `requests_per_second` is the average number of requests per seconds.\n\nNote: These `retry_on_failure` and `sending_queue` are provided (and documented) by the [Exporter Helper](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/exporterhelper#configuration)\n\nBeyond standard YAML configuration as outlined in the sections that follow,\nexporters that leverage the net/http package (all do today) also respect the\nfollowing proxy environment variables:\n\n* HTTP_PROXY\n* HTTPS_PROXY\n* NO_PROXY\n\nIf set at Collector start time then exporters, regardless of protocol,\nwill or will not proxy traffic as defined by these environment variables.\n\n### Logging Example\n\nThe logging exporter processes OpenTelemetry log entries and exports them to GCP Cloud Logging. Logs can be collected using one \nof the opentelemetry-collector-contrib log receivers, such as the [filelogreceiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver).\n\nLog entries must contain any Cloud Logging-specific fields as a matching OpenTelemetry attribute (as shown in examples from the\n[logs data model](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model.md#google-cloud-logging)).\nThese attributes can be parsed using the various [log operators](../../pkg/stanza/docs/operators/README.md#what-operators-are-available) available upstream.\n\nFor example, the following config parses the [HTTPRequest field](https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry#HttpRequest) from Apache log entries saved in `/var/log/apache.log`. \nIt also parses out the `timestamp` and inserts a non-default `log_name` attribute and GCP [MonitoredResource](https://cloud.google.com/logging/docs/reference/v2/rest/v2/MonitoredResource) attribute.\n\n```yaml\nreceivers:\n filelog:\n include: [ /var/log/apache.log ]\n start_at: beginning\n operators:\n - id: http_request_parser\n type: regex_parser\n regex: '(?m)^(?P\u003cremoteIp\u003e[^ ]*) (?P\u003chost\u003e[^ ]*) (?P\u003cuser\u003e[^ ]*) \\[(?P\u003ctime\u003e[^\\]]*)\\] \"(?P\u003crequestMethod\u003e\\S+)(?: +(?P\u003crequestUrl\u003e[^\\\"]*?)(?: +(?P\u003cprotocol\u003e\\S+))?)?\" (?P\u003cstatus\u003e[^ ]*) (?P\u003cresponseSize\u003e[^ ]*)(?: \"(?P\u003creferer\u003e[^\\\"]*)\" \"(?P\u003cuserAgent\u003e[^\\\"]*)\")?$'\n parse_to: attributes[\"gcp.http_request\"]\n timestamp:\n parse_from: attributes[\"gcp.http_request\"].time\n layout_type: strptime\n layout: '%d/%b/%Y:%H:%M:%S %z'\n converter:\n max_flush_count: 100\n flush_interval: 100ms\n\nexporters:\n googlecloud:\n project: my-gcp-project\n log:\n default_log_name: opentelemetry.io/collector-exported-log\n\nprocessors:\n memory_limiter:\n check_interval: 1s\n limit_percentage: 65\n spike_limit_percentage: 20\n resourcedetection:\n detectors: [gcp]\n timeout: 10s\n attributes:\n # Override the default log name. `gcp.log_name` takes precedence\n # over the `default_log_name` specified in the exporter.\n actions:\n - key: gcp.log_name\n action: insert\n value: apache-access-log\n\nservice:\n logs:\n receivers: [filelog]\n processors: [memory_limiter, resourcedetection, attributes]\n exporters: [googlecloud]\n\n```\n\nThis would parse logs of the following example structure:\n\n```\n127.0.0.1 - - [26/Apr/2022:22:53:36 +0800] \"GET / HTTP/1.1\" 200 1247\n```\n\nTo the following GCP entry structure:\n\n```\n {\n \"logName\": \"projects/my-gcp-project/logs/apache-access-log\",\n \"resource\": {\n \"type\": \"gce_instance\",\n \"labels\": {\n \"instance_id\": \"\",\n \"zone\": \"\"\n }\n },\n \"textPayload\": \"127.0.0.1 - - [26/Apr/2022:22:53:36 +0800] \\\"GET / HTTP/1.1\\\" 200 1247\",\n \"timestamp\": \"2022-05-02T12:16:14.574548493Z\",\n \"httpRequest\": {\n \"requestMethod\": \"GET\",\n \"requestUrl\": \"/\",\n \"status\": 200,\n \"responseSize\": \"1247\",\n \"remoteIp\": \"127.0.0.1\",\n \"protocol\": \"HTTP/1.1\"\n }\n }\n```\n\nThe logging exporter also supports the full range of [GCP log severity levels](https://cloud.google.com/logging/docs/reference/v2/rpc/google.logging.type#google.logging.type.LogSeverity), \nwhich differ from the available [OpenTelemetry log severity levels](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model.md#severity-fields). \nTo accommodate this, the following mapping is used to equate an incoming OpenTelemetry [`SeverityNumber`](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model.md#field-severitynumber) \nto a matching GCP log severity:\n\n|OTel `SeverityNumber`/Name|GCP severity level|\n|---|---|\n|Undefined|Default|\n|1-4 / Trace|Debug|\n|5-8 / Debug|Debug|\n|9-10 / Info|Info|\n|11-12 / Info|Notice|\n|13-16 / Warn|Warning|\n|17-20 / Error|Error|\n|21-22 / Fatal|Critical|\n|23 / Fatal|Alert|\n|24 / Fatal|Emergency|\n\nThe upstream [severity parser](../../pkg/stanza/docs/types/severity.md) (along\nwith the [regex parser](../../pkg/stanza/docs/operators/regex_parser.md)) allows for\nadditional flexibility in parsing log severity from incoming entries.\n\n## Multi-Project exporting\n\nBy default, the exporter sends telemetry to the project specified by `project` in the configuration. This can be overridden on a per-metrics basis using the `gcp.project.id` resource attribute. For example, if a metric has a label `project`, you could use the `groupbyattrs` processor to promote it to a resource label, and the `resource` processor to rename the attribute from `project` to `gcp.project.id`.\n\n### Multi-Project quota usage\n\nThe `gcp.project.id` label can be combined with the `destination_project_quota` option to attribute quota usage to the project parsed by the label. This feature is currently only available\nfor traces and metrics. The Collector's default service account will need `roles/serviceusage.serviceUsageConsumer` IAM permissions in the destination quota project.\n\nNote that this option will not work if a quota project is already defined in your Collector's GCP credentials. In this case, the telemetry will fail to export with a \"project not found\" error.\nThis can be done by manually editing your [ADC file](https://cloud.google.com/docs/authentication/application-default-credentials#personal) (if it exists) to remove the `quota_project_id` entry line.\n\n## Features and Feature-Gates\n\nSee the [Collector feature gates](https://github.com/open-telemetry/opentelemetry-collector/blob/main/featuregate/README.md#collector-feature-gates) for an overview of feature gates in the collector.","properties":{"destination_project_quota":{"title":"destination_project_quota","type":"boolean"},"impersonate":{"$ref":"#/$defs/github.com.GoogleCloudPlatform.opentelemetry-operations-go.exporter.collector.ImpersonateConfig","title":"impersonate"},"log":{"$ref":"#/$defs/github.com.GoogleCloudPlatform.opentelemetry-operations-go.exporter.collector.LogConfig","title":"log"},"metric":{"$ref":"#/$defs/github.com.GoogleCloudPlatform.opentelemetry-operations-go.exporter.collector.MetricConfig","title":"metric"},"project":{"title":"project","type":"string"},"retry_on_failure":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.RetrySettings","title":"retry_on_failure"},"sending_queue":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.QueueSettings","title":"sending_queue"},"timeout":{"description":"Timeout is the timeout for every attempt to send data to the backend.","title":"timeout","type":"string"},"trace":{"$ref":"#/$defs/github.com.GoogleCloudPlatform.opentelemetry-operations-go.exporter.collector.TraceConfig","title":"trace"},"user_agent":{"title":"user_agent","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.googlecloudpubsubexporter.Config":{"additionalProperties":false,"properties":{"compression":{"description":"Compression of the payload (only gzip or is supported, no compression is the default)","title":"compression","type":"string"},"project":{"description":"Google Cloud Project ID where the Pubsub client will connect to","title":"project","type":"string"},"retry_on_failure":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.RetrySettings","title":"retry_on_failure"},"sending_queue":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.QueueSettings","title":"sending_queue"},"timeout":{"description":"Timeout is the timeout for every attempt to send data to the backend.","title":"timeout","type":"string"},"topic":{"description":"The fully qualified resource name of the Pubsub topic","title":"topic","type":"string"},"user_agent":{"description":"User agent that will be used by the Pubsub client to connect to the service","title":"user_agent","type":"string"},"watermark":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.googlecloudpubsubexporter.WatermarkConfig","description":"Watermark defines the watermark (the ce-time attribute on the message) behavior","title":"watermark"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.googlecloudpubsubexporter.WatermarkConfig":{"additionalProperties":false,"description":"WatermarkConfig customizes the behavior of the watermark","properties":{"allowed_drift":{"description":"Indication on how much the timestamp can drift from the current time, the timestamp will be capped to the allowed\nmaximum. A duration of 0 is the same as maximum duration","title":"allowed_drift","type":"string"},"behavior":{"description":"Behavior of the watermark. Currently, only of the message (none, earliest and current, current being the default)\nwill set the timestamp on pubsub based on timestamps of the events inside the message","title":"behavior","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.googlemanagedprometheusexporter.Config":{"additionalProperties":false,"description":"Config defines configuration for Google Cloud Managed Service for Prometheus exporter.","markdownDescription":"# Google Managed Service for Prometheus Exporter\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: metrics |\n| Distributions | [contrib], [observiq] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThis exporter can be used to send metrics and traces to [Google Cloud Managed Service for Prometheus](https://cloud.google.com/stackdriver/docs/managed-prometheus). The difference between this exporter and the `googlecloud` exporter is that metrics sent with this exporter are queried using [promql](https://prometheus.io/docs/prometheus/latest/querying/basics/#querying-prometheus), rather than standard the standard MQL.\n\nThis exporter is not the standard method of ingesting metrics into Google Cloud Managed Service for Prometheus, which is built on a drop-in replacement for the Prometheus server: https://github.com/GoogleCloudPlatform/prometheus. This exporter does not support the full range of Prometheus functionality, including the UI, recording and alerting rules, and can't be used with the GMP Operator, but does support sending metrics.\n\n## Configuration Reference\n\nThe following configuration options are supported:\n\n- `project` (optional): GCP project identifier.\n- `user_agent` (optional): Override the user agent string sent on requests to Cloud Monitoring (currently only applies to metrics). Specify `{{version}}` to include the application version number. Defaults to `opentelemetry-collector-contrib {{version}}`.\n- `metric`(optional): Configuration for sending metrics to Cloud Monitoring.\n - `endpoint` (optional): Endpoint where metric data is going to be sent to. Replaces `endpoint`.\n- `use_insecure` (optional): If true, use gRPC as their communication transport. Only has effect if Endpoint is not \"\".\n- `retry_on_failure` (optional): Configuration for how to handle retries when sending data to Google Cloud fails.\n - `enabled` (default = false)\n - `initial_interval` (default = 5s): Time to wait after the first failure before retrying; ignored if `enabled` is `false`\n - `max_interval` (default = 30s): Is the upper bound on backoff; ignored if `enabled` is `false`\n - `max_elapsed_time` (default = 120s): Is the maximum amount of time spent trying to send a batch; ignored if `enabled` is `false`\n- `sending_queue` (optional): Configuration for how to buffer traces before sending.\n - `enabled` (default = true)\n - `num_consumers` (default = 10): Number of consumers that dequeue batches; ignored if `enabled` is `false`\n - `queue_size` (default = 1000): Maximum number of batches kept in memory before data; ignored if `enabled` is `false`;\n User should calculate this as `num_seconds * requests_per_second` where:\n - `num_seconds` is the number of seconds to buffer in case of a backend outage\n - `requests_per_second` is the average number of requests per seconds.\n\nNote: These `retry_on_failure` and `sending_queue` are provided (and documented) by the [Exporter Helper](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/exporterhelper#configuration)\n\n## Example Configuration\n\n```yaml\nreceivers:\n prometheus:\n config:\n scrape_configs:\n # Add your prometheus scrape configuration here.\n # Using kubernetes_sd_configs with namespaced resources (e.g. pod)\n # ensures the namespace is set on your metrics.\n - job_name: 'kubernetes-pods'\n kubernetes_sd_configs:\n - role: pod\n relabel_configs:\n - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]\n action: keep\n regex: true\n - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]\n action: replace\n target_label: __metrics_path__\n regex: (.+)\n - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]\n action: replace\n regex: (.+):(?:\\d+);(\\d+)\n replacement: $$1:$$2\n target_label: __address__\n - action: labelmap\n regex: __meta_kubernetes_pod_label_(.+)\nprocessors:\n batch:\n # batch metrics before sending to reduce API usage\n send_batch_max_size: 200\n send_batch_size: 200\n timeout: 5s\n memory_limiter:\n # drop metrics if memory usage gets too high\n check_interval: 1s\n limit_percentage: 65\n spike_limit_percentage: 20\n resourcedetection:\n # detect cluster name and location\n detectors: [gcp]\n timeout: 10s\n transform:\n # \"location\", \"cluster\", \"namespace\", \"job\", \"instance\", and \"project_id\" are reserved, and \n # metrics containing these labels will be rejected. Prefix them with exported_ to prevent this.\n metric_statements:\n - context: datapoint\n statements:\n - set(attributes[\"exported_location\"], attributes[\"location\"])\n - delete_key(attributes, \"location\")\n - set(attributes[\"exported_cluster\"], attributes[\"cluster\"])\n - delete_key(attributes, \"cluster\")\n - set(attributes[\"exported_namespace\"], attributes[\"namespace\"])\n - delete_key(attributes, \"namespace\")\n - set(attributes[\"exported_job\"], attributes[\"job\"])\n - delete_key(attributes, \"job\")\n - set(attributes[\"exported_instance\"], attributes[\"instance\"])\n - delete_key(attributes, \"instance\")\n - set(attributes[\"exported_project_id\"], attributes[\"project_id\"])\n - delete_key(attributes, \"project_id\")\n\nexporters:\n googlemanagedprometheus:\n\nservice:\n pipelines:\n metrics:\n receivers: [prometheus]\n processors: [batch, memory_limiter, transform, resourcedetection]\n exporters: [googlemanagedprometheus]\n```\n\n## Resource Attribute Handling\n\nThe Google Managed Prometheus exporter maps metrics to the\n[prometheus_target](https://cloud.google.com/monitoring/api/resources#tag_prometheus_target)\nmonitored resource. The logic for mapping to monitored resources is designed to\nbe used with the prometheus receiver, but can be used with other receivers as\nwell. To avoid collisions (i.e. \"duplicate timeseries enountered\" errors), you\nneed to ensure the prometheus_target resource uniquely identifies the source of\nmetrics. The exporter uses the following resource attributes to determine\nmonitored resource:\n\n* location: [`location`, `cloud.availability_zone`, `cloud.region`]\n* cluster: [`cluster`, `k8s.cluster.name`]\n* namespace: [`namespace`, `k8s.namespace.name`]\n* job: [`service.name` + `service.namespace`]\n* instance: [`service.instance.id`]\n\nIn the configuration above, `cloud.availability_zone`, `cloud.region`, and\n`k8s.cluster.name` are detected using the `resourcedetection` processor with\nthe `gcp` detector. The prometheus receiver sets `service.name` to the\nconfigured `job_name`, and `service.instance.id` is set to the scrape target's\n`instance`. The prometheus receiver sets `k8s.namespace.name` when using\n`role: pod`.\n\n### Manually Setting location, cluster, or namespace\n\nIn GMP, the above attributes are used to identify the `prometheus_target`\nmonitored resource. As such, it is recommended to avoid writing metric or resource labels\nthat match these keys. Doing so can cause errors when exporting metrics to\nGMP or when trying to query from GMP. So, the recommended way to set them\nis with the [resourcedetection processor](../../processor/resourcedetectionprocessor).\n\nIf you still need to set `location`, `cluster`, or `namespace` labels\n(such as when running in non-GCP environments), you can do so with the\n[resource processor](../../processor/resourceprocessor) like so:\n\n```yaml\nprocessors:\n resource:\n attributes:\n - key: \"location\"\n value: \"us-east-1\"\n action: upsert\n```\n\n### Setting cluster, location or namespace using metric labels\n\nThis example copies the `location` metric attribute to a new `exported_location`\nattribute, then deletes the original `location`. It is recommended to use the `exported_*`\nprefix, which is consistent with GMP's behavior.\n\nYou can also use the [groupbyattrs processor](../../processor/groupbyattrsprocessor)\nto move metric labels to resource labels. This is useful in situations\nwhere, for example, an exporter monitors multiple namespaces (with\neach namespace exported as a metric label). One such example is kube-state-metrics.\n\nUsing `groupbyattrs` will promote that label to a resource label and \nassociate those metrics with the new resource. For example:\n\n```yaml\nprocessors:\n groupbyattrs:\n keys:\n - namespace\n - cluster\n - location\n```","properties":{"metric":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.googlemanagedprometheusexporter.MetricConfig","title":"metric"},"project":{"title":"project","type":"string"},"retry_on_failure":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.RetrySettings","title":"retry_on_failure"},"sending_queue":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.QueueSettings","title":"sending_queue"},"timeout":{"description":"Timeout is the timeout for every attempt to send data to the backend.","title":"timeout","type":"string"},"user_agent":{"title":"user_agent","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.googlemanagedprometheusexporter.MetricConfig":{"additionalProperties":false,"properties":{"GetClientOptions":{"title":"GetClientOptions"},"compression":{"title":"compression","type":"string"},"endpoint":{"title":"endpoint","type":"string"},"grpc_pool_size":{"title":"grpc_pool_size","type":"integer"},"prefix":{"description":"Prefix configures the prefix of metrics sent to GoogleManagedPrometheus. Defaults to prometheus.googleapis.com.\nChanging this prefix is not recommended, as it may cause metrics to not be queryable with promql in the Cloud Monitoring UI.","title":"prefix","type":"string"},"use_insecure":{"title":"use_insecure","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.influxdbexporter.Config":{"additionalProperties":false,"description":"Config defines configuration for the InfluxDB exporter.","properties":{"CustomRoundTripper":{"description":"Custom Round Tripper to allow for individual components to intercept HTTP requests","title":"CustomRoundTripper"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing HTTP calls.","title":"auth"},"bucket":{"description":"Bucket is the InfluxDB bucket name that telemetry will be written to.","title":"bucket","type":"string"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"endpoint":{"description":"The target URL to send data to (e.g.: http://some.url:9411/v1/traces).","title":"endpoint","type":"string"},"headers":{"description":"Additional headers attached to each HTTP request sent by the client.\nExisting header values are overwritten if collision happens.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"idle_conn_timeout":{"description":"IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"idle_conn_timeout","type":"string"},"max_conns_per_host":{"description":"MaxConnsPerHost limits the total number of connections per host, including connections in the dialing,\nactive, and idle states.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_conns_per_host","type":"integer"},"max_idle_conns":{"description":"MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns","type":"integer"},"max_idle_conns_per_host":{"description":"MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns_per_host","type":"integer"},"metrics_schema":{"description":"MetricsSchema indicates the metrics schema to emit to line protocol.\nOptions:\n- telegraf-prometheus-v1\n- telegraf-prometheus-v2","title":"metrics_schema","type":"string"},"org":{"description":"Org is the InfluxDB organization name of the destination bucket.","title":"org","type":"string"},"read_buffer_size":{"description":"ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize.","title":"read_buffer_size","type":"integer"},"retry_on_failure":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.RetrySettings","title":"retry_on_failure"},"sending_queue":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.QueueSettings","title":"sending_queue"},"span_dimensions":{"description":"SpanDimensions are span attributes to be used as line protocol tags.\nThese are always included as tags:\n- trace ID\n- span ID\nThe default values are strongly recommended for use with Jaeger:\n- service.name\n- span.name\nOther common attributes can be found here:\n- https://github.com/open-telemetry/opentelemetry-collector/tree/main/semconv","items":{"type":"string"},"title":"span_dimensions","type":"array"},"timeout":{"description":"Timeout parameter configures `http.Client.Timeout`.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"token":{"description":"Token is used to identify InfluxDB permissions within the organization.","title":"token","type":"string"},"v1_compatibility":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.influxdbexporter.V1Compatibility","description":"V1Compatibility is used to specify if the exporter should use the v1.X InfluxDB API schema.","title":"v1_compatibility"},"write_buffer_size":{"description":"WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize.","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.influxdbexporter.V1Compatibility":{"additionalProperties":false,"description":"V1Compatibility is used to specify if the exporter should use the v1.X InfluxDB API schema.","markdownDescription":"# InfluxDB Exporter\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: traces, metrics, logs |\n| Distributions | [contrib], [observiq] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThis exporter supports sending tracing, metrics, and logging data to [InfluxDB](https://www.influxdata.com/products/).\n\n## Configuration\n\nThe following configuration options are supported:\n\n* `endpoint` (required) HTTP/S destination for line protocol\n - if path is set to root (/) or is unspecified, it will be changed to /api/v2/write.\n* `timeout` (default = 5s) Timeout for requests\n* `headers`: (optional) additional headers attached to each HTTP request\n - header `User-Agent` is `OpenTelemetry -\u003e Influx` by default\n - if `token` (below) is set, then header `Authorization` will overridden with the given token\n* `org` (required) Name of InfluxDB organization that owns the destination bucket\n* `bucket` (required) name of InfluxDB bucket to which signals will be written\n* `token` (optional) The authentication token for InfluxDB\n* `v1_compatibility` (optional) Options for exporting to InfluxDB v1.x\n * `enabled` (optional) Use InfluxDB v1.x API if enabled\n * `db` (required if enabled) Name of the InfluxDB database to which signals will be written\n * `username` (optional) Basic auth username for authenticating with InfluxDB v1.x\n * `password` (optional) Basic auth password for authenticating with InfluxDB v1.x\n* `span_dimensions` (default = service.name, span.name) Span attributes to use as dimensions (InfluxDB tags)\n* `metrics_schema` (default = telegraf-prometheus-v1) The chosen metrics schema to write; must be one of:\n * `telegraf-prometheus-v1`\n * `telegraf-prometheus-v2`\n* `sending_queue` [details here](https://github.com/open-telemetry/opentelemetry-collector/blob/v0.25.0/exporter/exporterhelper/README.md#configuration)\n * `enabled` (default = true)\n * `num_consumers` (default = 10) The number of consumers from the queue\n * `queue_size` (default = 1000) Maximum number of batches allowed in queue at a given time\n* `retry_on_failure` [details here](https://github.com/open-telemetry/opentelemetry-collector/blob/v0.25.0/exporter/exporterhelper/README.md#configuration)\n * `enabled` (default = true)\n * `initial_interval` (default = 5s) Time to wait after the first failure before retrying\n * `max_interval` (default = 30s) Upper bound on backoff interval\n * `max_elapsed_time` (default = 120s) Maximum amount of time (including retries) spent trying to send a request/batch\n\nThe full list of settings exposed for this exporter are documented in [config.go](config.go).\n\nExample:\n```yaml\nexporters:\n influxdb:\n endpoint: http://localhost:8080\n timeout: 500ms\n org: my-org\n bucket: my-bucket\n token: my-token\n span_dimensions:\n - service.name\n - span.name\n metrics_schema: telegraf-prometheus-v1\n\n sending_queue:\n enabled: true\n num_consumers: 3\n queue_size: 10\n\n retry_on_failure:\n enabled: true\n initial_interval: 1s\n max_interval: 3s\n max_elapsed_time: 10s\n```\n\n## Definitions\n\n[InfluxDB](https://www.influxdata.com/products/influxdb/) is an open-source time series database.\n\n[Telegraf](https://www.influxdata.com/time-series-platform/telegraf/) is an open-source metrics agent, similar to the OpenTelemetry Collector.\nTelegraf has [hundreds of plugins](https://www.influxdata.com/products/integrations/?_integrations_dropdown=telegraf-plugins).\n\n[Line protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) is a textual HTTP payload format used to move metrics between Telegraf agents and InfluxDB instances.\n\n## Schema\n\nThe OpenTelemetry-\u003eInfluxDB conversion [schema](https://github.com/influxdata/influxdb-observability/blob/main/docs/index.md) and [implementation](https://github.com/influxdata/influxdb-observability/tree/main/otel2influx) are hosted at https://github.com/influxdata/influxdb-observability .\n\nSpans are stored in measurement `spans`.\nMetric points through `metrics_schema=telegraf-prometheus-v1` are assigned measurement from the OTel field `Metric.name`.\nMetric points through `metrics_schema=telegraf-prometheus-v2` are stored in measurement `prometheus`.\nLogs are stored in measurement `logs`.\n\n### Example: Tracing Spans\n```\nspans end_time_unix_nano=\"2021-02-19 20:50:25.6893952 +0000 UTC\",instrumentation_library_name=\"tracegen\",kind=\"SPAN_KIND_INTERNAL\",name=\"okey-dokey\",net.peer.ip=\"1.2.3.4\",parent_span_id=\"d5270e78d85f570f\",peer.service=\"tracegen-client\",service.name=\"tracegen\",span.kind=\"server\",span_id=\"4c28227be6a010e1\",status_code=\"STATUS_CODE_OK\",trace_id=\"7d4854815225332c9834e6dbf85b9380\" 1613767825689169000\nspans end_time_unix_nano=\"2021-02-19 20:50:25.6893952 +0000 UTC\",instrumentation_library_name=\"tracegen\",kind=\"SPAN_KIND_INTERNAL\",name=\"lets-go\",net.peer.ip=\"1.2.3.4\",peer.service=\"tracegen-server\",service.name=\"tracegen\",span.kind=\"client\",span_id=\"d5270e78d85f570f\",status_code=\"STATUS_CODE_OK\",trace_id=\"7d4854815225332c9834e6dbf85b9380\" 1613767825689135000\nspans end_time_unix_nano=\"2021-02-19 20:50:25.6895667 +0000 UTC\",instrumentation_library_name=\"tracegen\",kind=\"SPAN_KIND_INTERNAL\",name=\"okey-dokey\",net.peer.ip=\"1.2.3.4\",parent_span_id=\"b57e98af78c3399b\",peer.service=\"tracegen-client\",service.name=\"tracegen\",span.kind=\"server\",span_id=\"a0643a156d7f9f7f\",status_code=\"STATUS_CODE_OK\",trace_id=\"fd6b8bb5965e726c94978c644962cdc8\" 1613767825689388000\nspans end_time_unix_nano=\"2021-02-19 20:50:25.6895667 +0000 UTC\",instrumentation_library_name=\"tracegen\",kind=\"SPAN_KIND_INTERNAL\",name=\"lets-go\",net.peer.ip=\"1.2.3.4\",peer.service=\"tracegen-server\",service.name=\"tracegen\",span.kind=\"client\",span_id=\"b57e98af78c3399b\",status_code=\"STATUS_CODE_OK\",trace_id=\"fd6b8bb5965e726c94978c644962cdc8\" 1613767825689303300\nspans end_time_unix_nano=\"2021-02-19 20:50:25.6896741 +0000 UTC\",instrumentation_library_name=\"tracegen\",kind=\"SPAN_KIND_INTERNAL\",name=\"okey-dokey\",net.peer.ip=\"1.2.3.4\",parent_span_id=\"6a8e6a0edcc1c966\",peer.service=\"tracegen-client\",service.name=\"tracegen\",span.kind=\"server\",span_id=\"d68f7f3b41eb8075\",status_code=\"STATUS_CODE_OK\",trace_id=\"651dadde186b7834c52b13a28fc27bea\" 1613767825689480300\n```\n\n### Example: Metrics - `telegraf-prometheus-v1`\n```\ncpu_temp,foo=bar gauge=87.332\nhttp_requests_total,method=post,code=200 counter=1027\nhttp_requests_total,method=post,code=400 counter=3\nhttp_request_duration_seconds 0.05=24054,0.1=33444,0.2=100392,0.5=129389,1=133988,sum=53423,count=144320,min=0,max=10\nrpc_duration_seconds 0.01=3102,0.05=3272,0.5=4773,0.9=9001,0.99=76656,sum=1.7560473e+07,count=2693\n```\n\n### Example: Metrics - `telegraf-prometheus-v2`\n```\nprometheus,foo=bar cpu_temp=87.332\nprometheus,method=post,code=200 http_requests_total=1027\nprometheus,method=post,code=400 http_requests_total=3\nprometheus,le=0.05 http_request_duration_seconds_bucket=24054\nprometheus,le=0.1 http_request_duration_seconds_bucket=33444\nprometheus,le=0.2 http_request_duration_seconds_bucket=100392\nprometheus,le=0.5 http_request_duration_seconds_bucket=129389\nprometheus,le=1 http_request_duration_seconds_bucket=133988\nprometheus http_request_duration_seconds_count=144320,http_request_duration_seconds_sum=53423,http_request_duration_seconds_min=0,http_request_duration_seconds_max=100\nprometheus,quantile=0.01 rpc_duration_seconds=3102\nprometheus,quantile=0.05 rpc_duration_seconds=3272\nprometheus,quantile=0.5 rpc_duration_seconds=4773\nprometheus,quantile=0.9 rpc_duration_seconds=9001\nprometheus,quantile=0.99 rpc_duration_seconds=76656\nprometheus rpc_duration_seconds_count=1.7560473e+07,rpc_duration_seconds_sum=2693\n```\n\n### Example: Logs\n```\nlogs fluent.tag=\"fluent.info\",pid=18i,ppid=9i,worker=0i 1613769568895331700\nlogs fluent.tag=\"fluent.debug\",instance=1720i,queue_size=0i,stage_size=0i 1613769568895697200\nlogs fluent.tag=\"fluent.info\",worker=0i 1613769568896515100\n```","properties":{"db":{"description":"DB is used to specify the name of the V1 InfluxDB database that telemetry will be written to.","title":"db","type":"string"},"enabled":{"description":"Enabled is used to specify if the exporter should use the v1.X InfluxDB API schema","title":"enabled","type":"boolean"},"password":{"description":"Password is used to optionally specify the basic auth password","title":"password","type":"string"},"username":{"description":"Username is used to optionally specify the basic auth username","title":"username","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.instanaexporter.Config":{"additionalProperties":false,"description":"Config defines configuration for the Instana exporter","markdownDescription":"# Instana Exporter\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [alpha]: traces |\n| Distributions | [contrib] |\n\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n\u003c!-- end autogenerated section --\u003e\n\nThe Instana Exporter converts OpenTelemetry trace data and then sends it to the [Instana Backend](https://www.ibm.com/docs/en/instana-observability/current?topic=setting-up-managing-instana).\n\n## Exporter Configuration\n\nThe following exporter configuration parameters are supported.\n\n\n| Parameter | Description |\n|----------------|-------------|\n| endpoint | The Instana backend endpoint that the Exporter connects to. It depends on your region and how it is hosted. It starts with ``https://serverless-`` for SaaS. Otherwise, it starts with ``https://``. It corresponds to the Instana environment variable ``INSTANA_ENDPOINT_URL`` |\n| agent_key | Your Instana Agent key. The same agent key can be used for host agents and serverless monitoring. It corresponds to the Instana environment variable ``INSTANA_AGENT_KEY`` |\n| tls/ca_file | [Optional] Certificate authority file for an Instana backend connection where the backend uses a self signed certificate. |\n\n\u003e These parameters match the Instana Serverless Monitoring environment variables and can be found [here](https://www.ibm.com/docs/en/instana-observability/current?topic=references-environment-variables#serverless-monitoring).\n\n\n### Sample Configuration\n\nThe code snippet below shows how your configuration file should look like:\n\n```yaml\n[...]\n\nexporters:\n instana:\n endpoint: ${env:INSTANA_ENDPOINT_URL}\n agent_key: ${env:INSTANA_AGENT_KEY}\n\n[...]\n\nservice:\n pipelines:\n traces:\n exporters: [instana]\n\n[...]\n```\n\n### Full Example\n\n```yaml\nreceivers:\n otlp:\n protocols:\n grpc:\n http:\n\nprocessors:\n batch:\nexporters:\n logging:\n loglevel: debug\n instana:\n endpoint: ${env:INSTANA_ENDPOINT_URL}\n agent_key: ${env:INSTANA_AGENT_KEY}\n tls:\n ca_file: someCA.pem # Optional. Certificate authority file for Instana backend connection.\n\nservice:\n pipelines:\n traces:\n receivers: [otlp]\n processors: [batch]\n exporters: [instana]\n```","properties":{"CustomRoundTripper":{"description":"Custom Round Tripper to allow for individual components to intercept HTTP requests","title":"CustomRoundTripper"},"agent_key":{"title":"agent_key","type":"string"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing HTTP calls.","title":"auth"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"endpoint":{"description":"The target URL to send data to (e.g.: http://some.url:9411/v1/traces).","title":"endpoint","type":"string"},"headers":{"description":"Additional headers attached to each HTTP request sent by the client.\nExisting header values are overwritten if collision happens.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"idle_conn_timeout":{"description":"IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"idle_conn_timeout","type":"string"},"max_conns_per_host":{"description":"MaxConnsPerHost limits the total number of connections per host, including connections in the dialing,\nactive, and idle states.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_conns_per_host","type":"integer"},"max_idle_conns":{"description":"MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns","type":"integer"},"max_idle_conns_per_host":{"description":"MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns_per_host","type":"integer"},"read_buffer_size":{"description":"ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize.","title":"read_buffer_size","type":"integer"},"timeout":{"description":"Timeout parameter configures `http.Client.Timeout`.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"write_buffer_size":{"description":"WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize.","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.jaegerexporter.Config":{"additionalProperties":false,"description":"Config defines configuration for Jaeger gRPC exporter.","markdownDescription":"# Deprecated Jaeger gRPC Exporter\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [deprecated]: traces |\n| Distributions | [core], [contrib] |\n\n[deprecated]: https://github.com/open-telemetry/opentelemetry-collector#deprecated\n[core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n\u003c!-- end autogenerated section --\u003e\n\nThis exporter is being deprecated and will be removed in July 2023 as Jaeger support OTLP directly. \n\nExports data via gRPC to [Jaeger](https://www.jaegertracing.io/) destinations.\nBy default, this exporter requires TLS and offers queued retry capabilities.\n\n## Getting Started\n\nThe following settings are required:\n\n- `endpoint` (no default): host:port to which the exporter is going to send Jaeger trace data,\nusing the gRPC protocol. The valid syntax is described\n[here](https://github.com/grpc/grpc/blob/master/doc/naming.md)\n\nBy default, TLS is enabled and must be configured under `tls:`:\n\n- `insecure` (default = `false`): whether to enable client transport security for\n the exporter's connection.\n\nAs a result, the following parameters are also required under `tls:`:\n\n- `cert_file` (no default): path to the TLS cert to use for TLS required connections. Should\n only be used if `insecure` is set to false.\n- `key_file` (no default): path to the TLS key to use for TLS required connections. Should\n only be used if `insecure` is set to false.\n\nExample:\n\n```yaml\nexporters:\n jaeger:\n endpoint: jaeger-all-in-one:14250\n tls:\n cert_file: file.cert\n key_file: file.key\n jaeger/2:\n endpoint: jaeger-all-in-one:14250\n tls:\n insecure: true\n```\n\n## Advanced Configuration\n\nSeveral helper files are leveraged to provide additional capabilities automatically:\n\n- [gRPC settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configgrpc/README.md)\n- [TLS and mTLS settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md)\n- [Queuing, retry and timeout settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md)","properties":{"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing RPCs.","title":"auth"},"balancer_name":{"description":"Sets the balancer in grpclb_policy to discover the servers. Default is pick_first.\nhttps://github.com/grpc/grpc-go/blob/master/examples/features/load_balancing/README.md","title":"balancer_name","type":"string"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"endpoint":{"description":"The target to which the exporter is going to send traces or metrics,\nusing the gRPC protocol. The valid syntax is described at\nhttps://github.com/grpc/grpc/blob/master/doc/naming.md.","title":"endpoint","type":"string"},"headers":{"description":"The headers associated with gRPC requests.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"keepalive":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configgrpc.KeepaliveClientConfig","description":"The keepalive parameters for gRPC client. See grpc.WithKeepaliveParams.\n(https://godoc.org/google.golang.org/grpc#WithKeepaliveParams).","title":"keepalive"},"read_buffer_size":{"description":"ReadBufferSize for gRPC client. See grpc.WithReadBufferSize.\n(https://godoc.org/google.golang.org/grpc#WithReadBufferSize).","title":"read_buffer_size","type":"integer"},"retry_on_failure":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.RetrySettings","title":"retry_on_failure"},"sending_queue":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.QueueSettings","title":"sending_queue"},"timeout":{"description":"Timeout is the timeout for every attempt to send data to the backend.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"wait_for_ready":{"description":"WaitForReady parameter configures client to wait for ready state before sending data.\n(https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md)","title":"wait_for_ready","type":"boolean"},"write_buffer_size":{"description":"WriteBufferSize for gRPC gRPC. See grpc.WithWriteBufferSize.\n(https://godoc.org/google.golang.org/grpc#WithWriteBufferSize).","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.jaegerthrifthttpexporter.Config":{"additionalProperties":false,"description":"Config defines configuration for Jaeger Thrift over HTTP exporter.","markdownDescription":"# Deprecated Jaeger Thrift Exporter\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [deprecated]: traces |\n| Distributions | [contrib] |\n\n[deprecated]: https://github.com/open-telemetry/opentelemetry-collector#deprecated\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n\u003c!-- end autogenerated section --\u003e\n\nThis exporter is being deprecated and will be removed in July 2023 as Jaeger support OTLP directly.\n\nThis exporter supports sending trace data to [Jaeger](https://www.jaegertracing.io) over Thrift HTTP.\n\n*WARNING:* The [Jaeger gRPC Exporter](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/jaegerexporter) is the recommended one for exporting traces from an OpenTelemetry Collector to Jaeger. This Jaeger Thrift Exporter should only be used to export traces to a Jaeger Collector that is unable to expose the [gRPC API](https://www.jaegertracing.io/docs/1.27/apis/#protobuf-via-grpc-stable).\n\n## Configuration\n\nThe following settings are required:\n\n- `endpoint` (no default): target to which the exporter is going to send Jaeger trace data,\nusing the Thrift HTTP protocol.\n\nThe following settings can be optionally configured:\n\n- `timeout` (default = 5s): the maximum time to wait for a HTTP request to complete\n- `headers` (no default): headers to be added to the HTTP request\n\nExample:\n\n```yaml\nexporters:\n jaeger_thrift:\n endpoint: \"http://jaeger.example.com/api/traces\"\n timeout: 2s\n headers:\n added-entry: \"added value\"\n dot.test: test\n```\n\nThe full list of settings exposed for this exporter are documented [here](config.go)\nwith detailed sample configurations [here](testdata/config.yaml).\n\nThis exporter also offers proxy support as documented\n[here](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter#proxy-support).","properties":{"CustomRoundTripper":{"description":"Custom Round Tripper to allow for individual components to intercept HTTP requests","title":"CustomRoundTripper"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing HTTP calls.","title":"auth"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"endpoint":{"description":"The target URL to send data to (e.g.: http://some.url:9411/v1/traces).","title":"endpoint","type":"string"},"headers":{"description":"Additional headers attached to each HTTP request sent by the client.\nExisting header values are overwritten if collision happens.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"idle_conn_timeout":{"description":"IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"idle_conn_timeout","type":"string"},"max_conns_per_host":{"description":"MaxConnsPerHost limits the total number of connections per host, including connections in the dialing,\nactive, and idle states.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_conns_per_host","type":"integer"},"max_idle_conns":{"description":"MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns","type":"integer"},"max_idle_conns_per_host":{"description":"MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns_per_host","type":"integer"},"read_buffer_size":{"description":"ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize.","title":"read_buffer_size","type":"integer"},"timeout":{"description":"Timeout parameter configures `http.Client.Timeout`.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"write_buffer_size":{"description":"WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize.","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.kafkaexporter.AWSMSKConfig":{"additionalProperties":false,"description":"AWSMSKConfig defines the additional SASL authentication measures needed to use AWS_MSK_IAM mechanism","properties":{"broker_addr":{"description":"BrokerAddr is the client is connecting to in order to perform the auth required","title":"broker_addr","type":"string"},"region":{"description":"Region is the AWS region the MSK cluster is based in","title":"region","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.kafkaexporter.Authentication":{"additionalProperties":false,"description":"Authentication defines authentication.","markdownDescription":"# Kafka Exporter\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: traces, metrics, logs |\n| Distributions | [contrib], [aws], [observiq], [splunk], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[aws]: https://github.com/aws-observability/aws-otel-collector\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nKafka exporter exports logs, metrics, and traces to Kafka. This exporter uses a synchronous producer\nthat blocks and does not batch messages, therefore it should be used with batch and queued retry\nprocessors for higher throughput and resiliency. Message payload encoding is configurable.\n\nThe following settings are required:\n- `protocol_version` (no default): Kafka protocol version e.g. 2.0.0\n\nThe following settings can be optionally configured:\n- `brokers` (default = localhost:9092): The list of kafka brokers\n- `topic` (default = otlp_spans for traces, otlp_metrics for metrics, otlp_logs for logs): The name of the kafka topic to export to.\n- `encoding` (default = otlp_proto): The encoding of the traces sent to kafka. All available encodings:\n - `otlp_proto`: payload is Protobuf serialized from `ExportTraceServiceRequest` if set as a traces exporter or `ExportMetricsServiceRequest` for metrics or `ExportLogsServiceRequest` for logs.\n - `otlp_json`: ** EXPERIMENTAL ** payload is JSON serialized from `ExportTraceServiceRequest` if set as a traces exporter or `ExportMetricsServiceRequest` for metrics or `ExportLogsServiceRequest` for logs. \n - The following encodings are valid *only* for **traces**.\n - `jaeger_proto`: the payload is serialized to a single Jaeger proto `Span`, and keyed by TraceID.\n - `jaeger_json`: the payload is serialized to a single Jaeger JSON Span using `jsonpb`, and keyed by TraceID.\\\n - The following encodings are valid *only* for **logs**.\n - `raw`: if the log record body is a byte array, it is sent as is. Otherwise, it is serialized to JSON. Resource and record attributes are discarded.\n- `auth`\n - `plain_text`\n - `username`: The username to use.\n - `password`: The password to use\n - `sasl`\n - `username`: The username to use.\n - `password`: The password to use\n - `mechanism`: The sasl mechanism to use (SCRAM-SHA-256, SCRAM-SHA-512, AWS_MSK_IAM or PLAIN)\n - `aws_msk.region`: AWS Region in case of AWS_MSK_IAM mechanism\n - `aws_msk.broker_addr`: MSK Broker address in case of AWS_MSK_IAM mechanism\n - `tls`\n - `ca_file`: path to the CA cert. For a client this verifies the server certificate. Should\n only be used if `insecure` is set to true.\n - `cert_file`: path to the TLS cert to use for TLS required connections. Should\n only be used if `insecure` is set to true.\n - `key_file`: path to the TLS key to use for TLS required connections. Should\n only be used if `insecure` is set to true.\n - `insecure` (default = false): Disable verifying the server's certificate chain and host \n name (`InsecureSkipVerify` in the tls config)\n - `server_name_override`: ServerName indicates the name of the server requested by the client\n in order to support virtual hosting.\n - `kerberos`\n - `service_name`: Kerberos service name\n - `realm`: Kerberos realm\n - `use_keytab`: Use of keytab instead of password, if this is true, keytab file will be used instead of password\n - `username`: The Kerberos username used for authenticate with KDC\n - `password`: The Kerberos password used for authenticate with KDC\n - `config_file`: Path to Kerberos configuration. i.e /etc/krb5.conf\n - `keytab_file`: Path to keytab file. i.e /etc/security/kafka.keytab\n- `metadata`\n - `full` (default = true): Whether to maintain a full set of metadata. \n When disabled the client does not make the initial request to broker at the startup.\n - `retry`\n - `max` (default = 3): The number of retries to get metadata\n - `backoff` (default = 250ms): How long to wait between metadata retries\n- `timeout` (default = 5s): Is the timeout for every attempt to send data to the backend.\n- `retry_on_failure`\n - `enabled` (default = true)\n - `initial_interval` (default = 5s): Time to wait after the first failure before retrying; ignored if `enabled` is `false`\n - `max_interval` (default = 30s): Is the upper bound on backoff; ignored if `enabled` is `false`\n - `max_elapsed_time` (default = 120s): Is the maximum amount of time spent trying to send a batch; ignored if `enabled` is `false`\n- `sending_queue`\n - `enabled` (default = true)\n - `num_consumers` (default = 10): Number of consumers that dequeue batches; ignored if `enabled` is `false`\n - `queue_size` (default = 1000): Maximum number of batches kept in memory before dropping data; ignored if `enabled` is `false`;\n User should calculate this as `num_seconds * requests_per_second` where:\n - `num_seconds` is the number of seconds to buffer in case of a backend outage\n - `requests_per_second` is the average number of requests per seconds.\n- `producer`\n - `max_message_bytes` (default = 1000000) the maximum permitted size of a message in bytes\n - `required_acks` (default = 1) controls when a message is regarded as transmitted. https://pkg.go.dev/github.com/Shopify/sarama@v1.30.0#RequiredAcks\n - `compression` (default = 'none') the compression used when producing messages to kafka. The options are: `none`, `gzip`, `snappy`, `lz4`, and `zstd` https://pkg.go.dev/github.com/Shopify/sarama@v1.30.0#CompressionCodec\n - `flush_max_messages` (default = 0) The maximum number of messages the producer will send in a single broker request.\n\nExample configuration:\n\n```yaml\nexporters:\n kafka:\n brokers:\n - localhost:9092\n protocol_version: 2.0.0\n```","properties":{"kerberos":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.kafkaexporter.KerberosConfig","title":"kerberos"},"plain_text":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.kafkaexporter.PlainTextConfig","title":"plain_text"},"sasl":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.kafkaexporter.SASLConfig","title":"sasl"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","title":"tls"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.kafkaexporter.Config":{"additionalProperties":false,"description":"Config defines configuration for Kafka exporter.","properties":{"auth":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.kafkaexporter.Authentication","description":"Authentication defines used authentication mechanism.","title":"auth"},"brokers":{"description":"The list of kafka brokers (default localhost:9092)","items":{"type":"string"},"title":"brokers","type":"array"},"encoding":{"description":"Encoding of messages (default \"otlp_proto\")","title":"encoding","type":"string"},"metadata":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.kafkaexporter.Metadata","description":"Metadata is the namespace for metadata management properties used by the\nClient, and shared by the Producer/Consumer.","title":"metadata"},"producer":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.kafkaexporter.Producer","description":"Producer is the namespaces for producer properties used only by the Producer","title":"producer"},"protocol_version":{"description":"Kafka protocol version","title":"protocol_version","type":"string"},"retry_on_failure":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.RetrySettings","title":"retry_on_failure"},"sending_queue":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.QueueSettings","title":"sending_queue"},"timeout":{"description":"Timeout is the timeout for every attempt to send data to the backend.","title":"timeout","type":"string"},"topic":{"description":"The name of the kafka topic to export to (default otlp_spans for traces, otlp_metrics for metrics)","title":"topic","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.kafkaexporter.KerberosConfig":{"additionalProperties":false,"description":"KerberosConfig defines kereros configuration.","properties":{"config_file":{"title":"config_file","type":"string"},"keytab_file":{"title":"keytab_file","type":"string"},"password":{"title":"password","type":"string"},"realm":{"title":"realm","type":"string"},"service_name":{"title":"service_name","type":"string"},"use_keytab":{"title":"use_keytab","type":"boolean"},"username":{"title":"username","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.kafkaexporter.Metadata":{"additionalProperties":false,"description":"Metadata defines configuration for retrieving metadata from the broker.","properties":{"full":{"description":"Whether to maintain a full set of metadata for all topics, or just\nthe minimal set that has been necessary so far. The full set is simpler\nand usually more convenient, but can take up a substantial amount of\nmemory if you have many topics and partitions. Defaults to true.","title":"full","type":"boolean"},"retry":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.kafkaexporter.MetadataRetry","description":"Retry configuration for metadata.\nThis configuration is useful to avoid race conditions when broker\nis starting at the same time as collector.","title":"retry"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.kafkaexporter.MetadataRetry":{"additionalProperties":false,"description":"MetadataRetry defines retry configuration for Metadata.","properties":{"backoff":{"description":"How long to wait for leader election to occur before retrying\n(default 250ms). Similar to the JVM's `retry.backoff.ms`.","title":"backoff","type":"string"},"max":{"description":"The total number of times to retry a metadata request when the\ncluster is in the middle of a leader election or at startup (default 3).","title":"max","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.kafkaexporter.PlainTextConfig":{"additionalProperties":false,"description":"PlainTextConfig defines plaintext authentication.","properties":{"password":{"title":"password","type":"string"},"username":{"title":"username","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.kafkaexporter.Producer":{"additionalProperties":false,"description":"Producer defines configuration for producer","properties":{"compression":{"description":"Compression Codec used to produce messages\nhttps://pkg.go.dev/github.com/Shopify/sarama@v1.30.0#CompressionCodec\nThe options are: 'none', 'gzip', 'snappy', 'lz4', and 'zstd'","title":"compression","type":"string"},"flush_max_messages":{"description":"The maximum number of messages the producer will send in a single\nbroker request. Defaults to 0 for unlimited. Similar to\n`queue.buffering.max.messages` in the JVM producer.","title":"flush_max_messages","type":"integer"},"max_message_bytes":{"description":"Maximum message bytes the producer will accept to produce.","title":"max_message_bytes","type":"integer"},"required_acks":{"description":"RequiredAcks Number of acknowledgements required to assume that a message has been sent.\nhttps://pkg.go.dev/github.com/Shopify/sarama@v1.30.0#RequiredAcks\nThe options are:\n 0 -\u003e NoResponse. doesn't send any response\n 1 -\u003e WaitForLocal. waits for only the local commit to succeed before responding ( default )\n -1 -\u003e WaitForAll. waits for all in-sync replicas to commit before responding.","title":"required_acks","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.kafkaexporter.SASLConfig":{"additionalProperties":false,"description":"SASLConfig defines the configuration for the SASL authentication.","properties":{"aws_msk":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.kafkaexporter.AWSMSKConfig","title":"aws_msk"},"mechanism":{"description":"SASL Mechanism to be used, possible values are: (PLAIN, AWS_MSK_IAM, SCRAM-SHA-256 or SCRAM-SHA-512).","title":"mechanism","type":"string"},"password":{"description":"Password to be used on authentication","title":"password","type":"string"},"username":{"description":"Username to be used on authentication","title":"username","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.loadbalancingexporter.Config":{"additionalProperties":false,"description":"Config defines configuration for the exporter.","markdownDescription":"# Trace ID/Service-name aware load-balancing exporter\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: traces, logs |\n| Distributions | [contrib], [observiq], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThis is an exporter that will consistently export spans and logs depending on the `routing_key` configured. If no `routing_key` is configured, the default routing mechanism is `traceID`. This means that spans belonging to the same `traceID` (or `service.name`, when `service` is used as the `routing_key`) will be sent to the same backend.\n\nIt requires a source of backend information to be provided: static, with a fixed list of backends, or DNS, with a hostname that will resolve to all IP addresses to use. The DNS resolver will periodically check for updates.\n\nNote that either the Trace ID or Service name is used for the decision on which backend to use: the actual backend load isn't taken into consideration. Even though this load-balancer won't do round-robin balancing of the batches, the load distribution should be very similar among backends with a standard deviation under 5% at the current configuration.\n\nThis load balancer is especially useful for backends configured with tail-based samplers or red-metrics-collectors, which make a decision based on the view of the full trace.\n\nWhen a list of backends is updated, around 1/n of the space will be changed, so that the same trace ID might be directed to a different backend, where n is the number of backends. This should be stable enough for most cases, and the higher the number of backends, the less disruption it should cause. Still, if routing stability is important for your use case and your list of backends are constantly changing, consider using the `groupbytrace` processor. This way, traces are dispatched atomically to this exporter, and the same decision about the backend is made for the trace as a whole.\n\nThis also supports service name based exporting for traces. If you have two or more collectors that collect traces and then use spanmetrics processor to generate metrics and push to prometheus, there is a high chance of facing label collisions on prometheus if the routing is based on `traceID` because every collector sees the `service+operation` label. With service name based routing, each collector can only see one service name and can push metrics without any label collisions.\n## Configuration\n\nRefer to [config.yaml](./testdata/config.yaml) for detailed examples on using the processor.\n\n* The `otlp` property configures the template used for building the OTLP exporter. Refer to the OTLP Exporter documentation for information on which options are available. Note that the `endpoint` property should not be set and will be overridden by this exporter with the backend endpoint.\n* The `resolver` accepts either a `static` node, or a `dns`. If both are specified, `dns` takes precedence.\n* The `hostname` property inside a `dns` node specifies the hostname to query in order to obtain the list of IP addresses.\n* The `dns` node also accepts the following optional properties:\n * `hostname` DNS hostname to resolve.\n * `port` port to be used for exporting the traces to the IP addresses resolved from `hostname`. If `port` is not specified, the default port 4317 is used.\n * `interval` resolver interval in go-Duration format, e.g. `5s`, `1d`, `30m`. If not specified, `5s` will be used.\n * `timeout` resolver timeout in go-Duration format, e.g. `5s`, `1d`, `30m`. If not specified, `1s` will be used.\n* The `routing_key` property is used to route spans to exporters based on different parameters. This functionality is currently enabled only for `trace` pipeline types. It supports one of the following values:\n * `service`: exports spans based on their service name. This is useful when using processors like the span metrics, so all spans for each service are sent to consistent collector instances for metric collection. Otherwise, metrics for the same services are sent to different collectors, making aggregations inaccurate. \n * `traceID` (default): exports spans based on their `traceID`.\n * If not configured, defaults to `traceID` based routing.\n\nSimple example\n```yaml\nreceivers:\n otlp:\n protocols:\n grpc:\n endpoint: localhost:4317\n\nprocessors:\n\nexporters:\n logging:\n loadbalancing:\n routing_key: \"service\"\n protocol:\n otlp:\n # all options from the OTLP exporter are supported\n # except the endpoint\n timeout: 1s\n resolver:\n static:\n hostnames:\n - backend-1:4317\n - backend-2:4317\n - backend-3:4317\n - backend-4:4317\n\nservice:\n pipelines:\n traces:\n receivers:\n - otlp\n processors: []\n exporters:\n - loadbalancing\n logs:\n receivers:\n - otlp\n processors: []\n exporters:\n - loadbalancing\n```\n\nFor testing purposes, the following configuration can be used, where both the load balancer and all backends are running locally:\n```yaml\nreceivers:\n otlp/loadbalancer:\n protocols:\n grpc:\n endpoint: localhost:4317\n otlp/backend-1:\n protocols:\n grpc:\n endpoint: localhost:55690\n otlp/backend-2:\n protocols:\n grpc:\n endpoint: localhost:55700\n otlp/backend-3:\n protocols:\n grpc:\n endpoint: localhost:55710\n otlp/backend-4:\n protocols:\n grpc:\n endpoint: localhost:55720\n\nprocessors:\n\nexporters:\n logging:\n loadbalancing:\n protocol:\n otlp:\n timeout: 1s\n tls:\n insecure: true\n resolver:\n static:\n hostnames:\n - localhost:55690\n - localhost:55700\n - localhost:55710\n - localhost:55720\n\nservice:\n pipelines:\n traces/loadbalancer:\n receivers:\n - otlp/loadbalancer\n processors: []\n exporters:\n - loadbalancing\n\n traces/backend-1:\n receivers:\n - otlp/backend-1\n processors: []\n exporters:\n - logging\n\n traces/backend-2:\n receivers:\n - otlp/backend-2\n processors: []\n exporters:\n - logging\n\n traces/backend-3:\n receivers:\n - otlp/backend-3\n processors: []\n exporters:\n - logging\n\n traces/backend-4:\n receivers:\n - otlp/backend-4\n processors: []\n exporters:\n - logging\n\n logs/loadbalancer:\n receivers:\n - otlp/loadbalancer\n processors: []\n exporters:\n - loadbalancing\n logs/backend-1:\n receivers:\n - otlp/backend-1\n processors: []\n exporters:\n - logging\n logs/backend-2:\n receivers:\n - otlp/backend-2\n processors: []\n exporters:\n - logging\n logs/backend-3:\n receivers:\n - otlp/backend-3\n processors: []\n exporters:\n - logging\n logs/backend-4:\n receivers:\n - otlp/backend-4\n processors: []\n exporters:\n - logging\n```\n\n## Metrics\n\nThe following metrics are recorded by this processor:\n\n* `otelcol_loadbalancer_num_resolutions` represents the total number of resolutions performed by the resolver specified in the tag `resolver`, split by their outcome (`success=true|false`). For the static resolver, this should always be `1` with the tag `success=true`.\n* `otelcol_loadbalancer_num_backends` informs how many backends are currently in use. It should always match the number of items specified in the configuration file in case the `static` resolver is used, and should eventually (seconds) catch up with the DNS changes. Note that DNS caches that might exist between the load balancer and the record authority will influence how long it takes for the load balancer to see the change.\n* `otelcol_loadbalancer_num_backend_updates` records how many of the resolutions resulted in a new list of backends. Use this information to understand how frequent your backend updates are and how often the ring is rebalanced. If the DNS hostname is always returning the same list of IP addresses but this metric keeps increasing, it might indicate a bug in the load balancer.\n* `otelcol_loadbalancer_backend_latency` measures the latency for each backend.\n* `otelcol_loadbalancer_backend_outcome` counts what the outcomes were for each endpoint, `success=true|false`.","properties":{"protocol":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.loadbalancingexporter.Protocol","title":"protocol"},"resolver":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.loadbalancingexporter.ResolverSettings","title":"resolver"},"routing_key":{"title":"routing_key","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.loadbalancingexporter.DNSResolver":{"additionalProperties":false,"description":"DNSResolver defines the configuration for the DNS resolver","properties":{"hostname":{"title":"hostname","type":"string"},"interval":{"title":"interval","type":"string"},"port":{"title":"port","type":"string"},"timeout":{"title":"timeout","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.loadbalancingexporter.Protocol":{"additionalProperties":false,"description":"Protocol holds the individual protocol-specific settings.","properties":{"otlp":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.otlpexporter.Config","title":"otlp"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.loadbalancingexporter.ResolverSettings":{"additionalProperties":false,"description":"ResolverSettings defines the configurations for the backend resolver","properties":{"dns":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.loadbalancingexporter.DNSResolver","title":"dns"},"static":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.loadbalancingexporter.StaticResolver","title":"static"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.loadbalancingexporter.StaticResolver":{"additionalProperties":false,"description":"StaticResolver defines the configuration for the resolver providing a fixed list of backends","properties":{"hostnames":{"items":{"type":"string"},"title":"hostnames","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.logicmonitorexporter.APIToken":{"additionalProperties":false,"properties":{"access_id":{"title":"access_id","type":"string"},"access_key":{"title":"access_key","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.logicmonitorexporter.Config":{"additionalProperties":false,"description":"Config defines configuration for LogicMonitor exporter.","markdownDescription":"# LogicMonitor Exporter\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [alpha]: traces, logs |\n| Distributions | [contrib] |\n\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n\u003c!-- end autogenerated section --\u003e\n\nThis exporter supports sending traces \u0026 logs to [Logicmonitor](https://www.logicmonitor.com/).\n\n## Configuration Options\nThe following configuration options are supported:\n\n`endpoint (required)`: The target base URL to send data to (e.g.: https://\u003ccompany_name\u003e.logicmonitor.com/rest). For logs, \"/log/ingest\" path will be appended by default.\\\n`api_token` : API Token of Logicmonitor\n\n## Prerequisite\nBelow environment variable must be provided\n\n| Key | Value |\n| ------ | ------ |\n| LOGICMONITOR_ACCOUNT | Company name |\n\n**NOTE**: For ingesting data into the Logicmonitor, either its API Token or Bearer Token is required.\n\n## Example\n##### Ingestion through API Token\nPass `access_id` and `access_key` through config.yaml as shown in example ***OR*** \nSet the environment variables `LOGICMONITOR_ACCESS_ID` and `LOGICMONITOR_ACCESS_KEY`\n```yaml\n exporters:\n logicmonitor:\n endpoint: \"https://\u003ccompany_name\u003e.logicmonitor.com/rest\"\n api_token:\n access_id: \"\u003caccess_id of logicmonitor\u003e\"\n access_key: \"\u003caccess_key of logicmonitor\u003e\"\n```\n##### Ingestion through Bearer token\n\nPass bearer token as Authorization headers through config.yaml as shown in example ***OR***\nSet the environment variable `LOGICMONITOR_BEARER_TOKEN`\n```yaml\n exporters:\n logicmonitor:\n endpoint: \"https://\u003ccompany_name\u003e.logicmonitor.com/rest\"\n headers:\n Authorization: Bearer \u003cbearer token of logicmonitor\u003e\n```","properties":{"CustomRoundTripper":{"description":"Custom Round Tripper to allow for individual components to intercept HTTP requests","title":"CustomRoundTripper"},"api_token":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.logicmonitorexporter.APIToken","description":"ApiToken of Logicmonitor Platform","title":"api_token"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing HTTP calls.","title":"auth"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"endpoint":{"description":"The target URL to send data to (e.g.: http://some.url:9411/v1/traces).","title":"endpoint","type":"string"},"headers":{"description":"Additional headers attached to each HTTP request sent by the client.\nExisting header values are overwritten if collision happens.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"idle_conn_timeout":{"description":"IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"idle_conn_timeout","type":"string"},"max_conns_per_host":{"description":"MaxConnsPerHost limits the total number of connections per host, including connections in the dialing,\nactive, and idle states.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_conns_per_host","type":"integer"},"max_idle_conns":{"description":"MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns","type":"integer"},"max_idle_conns_per_host":{"description":"MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns_per_host","type":"integer"},"read_buffer_size":{"description":"ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize.","title":"read_buffer_size","type":"integer"},"resource_to_telemetry_conversion":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.pkg.resourcetotelemetry.Settings","title":"resource_to_telemetry_conversion"},"retry_on_failure":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.RetrySettings","title":"retry_on_failure"},"sending_queue":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.QueueSettings","title":"sending_queue"},"timeout":{"description":"Timeout parameter configures `http.Client.Timeout`.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"write_buffer_size":{"description":"WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize.","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.logzioexporter.Config":{"additionalProperties":false,"description":"Config contains Logz.io specific configuration such as Account TracesToken, Region, etc.","markdownDescription":"# Logzio Exporter\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: traces, logs |\n| Distributions | [contrib], [aws], [observiq] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[aws]: https://github.com/aws-observability/aws-otel-collector\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThis exporter supports sending trace and log data to [Logz.io](https://www.logz.io)\n\n### The following configuration options are supported:\nLogz.io exporter is utilizing opentelemetry [exporter helper](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md) for `retry_on_failure`,`sending_queue` and `timeout` settings\n- `account_token` (Required): Your logz.io account token for your tracing or logs account.\n- `region` Your logz.io account [region code](https://docs.logz.io/user-guide/accounts/account-region.html#available-regions). Defaults to `us`. Required only if your logz.io region is different than US.\n- `endpoint` Custom endpoint, mostly used for dev or testing. This will override the region parameter.\n- `retry_on_failure` \n - `enabled` (default = true)\n - `initial_interval`: Time to wait after the first failure before retrying; ignored if `enabled` is `false` (default = 5s)\n - `max_interval`: Is the upper bound on backoff; ignored if `enabled` is `false` (default = 30s)\n - `max_elapsed_time`: Is the maximum amount of time spent trying to send a batch; ignored if `enabled` is `false` (default = 300s)\n- `sending_queue`\n - `enabled` (default = true)\n - `num_consumers`: Number of consumers that dequeue batches; ignored if `enabled` is `false` (default = 10)\n - `queue_size`: Maximum number of batches kept in memory before dropping; ignored if `enabled` is `false`\n User should calculate this as `num_seconds * requests_per_second` where:\n - `num_seconds` is the number of seconds to buffer in case of a backend outage\n - `requests_per_second` is the average number of requests per seconds.\n - default = 1000\n- `timeout`: Time to wait per individual attempt to send data to a backend. default = 30s\n\n#### Tracing example:\n* We recommend using `batch` processor. Batching helps better compress the data and reduce the number of outgoing connections required to transmit the data.\n\n```yaml\nreceivers:\n otlp:\n protocols:\n grpc:\n endpoint: \"0.0.0.0:4317\"\n http:\n endpoint: \"0.0.0.0:4318\"\n jaeger:\n protocols:\n thrift_compact:\n endpoint: \"0.0.0.0:6831\"\n thrift_binary:\n endpoint: \"0.0.0.0:6832\"\n grpc:\n endpoint: \"0.0.0.0:14250\"\n thrift_http:\n endpoint: \"0.0.0.0:14268\"\nprocessors:\n batch:\n send_batch_size: 10000\n timeout: 1s\nexporters:\n logzio/traces:\n account_token: \"LOGZIOtraceTOKEN\"\n region: \"us\"\nservice:\n pipelines:\n traces:\n receivers: [ otlp,jaeger ]\n processors: [ batch ]\n exporters: [ logzio/traces ]\n telemetry:\n logs:\n level: \"debug\"\n```\n#### Logs example:\n* We recommend using `batch` processor. Batching helps better compress the data and reduce the number of outgoing connections required to transmit the data.\n* We recommend adding `type` attribute to classify your log records\n* We recommend adding `resourcedetection` processor to add metadata to your log records\n\n```yaml\nreceivers:\n filelog:\n include: [ \"/private/var/log/*.log\" ] # MacOs system logs\n include_file_name: false\n include_file_path: true \n operators:\n - type: move\n from: attributes[\"log.file.path\"]\n to: attributes[\"log_file_path\"]\n attributes:\n type: \u003c\u003cyour-logzio-type\u003e\u003e\nprocessors:\n batch:\n send_batch_size: 10000\n timeout: 1s\n resourcedetection/system:\n detectors: [ \"system\" ]\n system:\n hostname_sources: [ \"os\" ]\nexporters:\n logzio/logs:\n account_token: \"LOGZIOlogsTOKEN\"\n region: \"us\"\nservice:\n pipelines:\n logs:\n receivers: [filelog]\n processors: [ resourcedetection/system, batch ]\n exporters: [logzio/logs]\n telemetry:\n logs:\n level: \"debug\"\n```\n#### Metrics:\nIn order to use the Prometheus backend you must use the standard prometheusremotewrite exporter as well. The following [regions](https://docs.logz.io/user-guide/accounts/account-region.html#supported-regions-for-prometheus-metrics) are supported and configured as follows. The Logz.io Listener URL for for your region, configured to use port 8052 for http traffic, or port 8053 for https traffic.\nExample:\n```yaml\nexporters:\n prometheusremotewrite:\n endpoint: \"https://listener.logz.io:8053\"\n headers:\n Authorization: \"Bearer LOGZIOprometheusTOKEN\"\n```\n\nPutting these both together it would look like this in a full configuration:\n\n```yaml\nreceivers:\n jaeger:\n protocols:\n thrift_http:\n endpoint: \"0.0.0.0:14278\"\n\n prometheus:\n config:\n scrape_configs:\n - job_name: 'ratelimiter'\n scrape_interval: 15s\n static_configs:\n - targets: [ \"0.0.0.0:8889\" ]\n\nexporters:\n logzio/traces:\n account_token: \"LOGZIOtraceTOKEN\"\n region: \"us\"\n\n prometheusremotewrite:\n endpoint: \"https://listener.logz.io:8053\"\n headers:\n Authorization: \"Bearer LOGZIOprometheusTOKEN\"\n\nprocessors:\n batch:\n send_batch_size: 10000\n timeout: 1s\n \nservice:\n pipelines:\n traces:\n receivers: [jaeger]\n processors: [batch]\n exporters: [logzio/traces]\n\n metrics:\n receivers: [prometheus]\n exporters: [prometheusremotewrite]\n \n telemetry:\n logs:\n level: debug #activate debug mode\n```","properties":{"CustomRoundTripper":{"description":"Custom Round Tripper to allow for individual components to intercept HTTP requests","title":"CustomRoundTripper"},"account_token":{"title":"account_token","type":"string"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing HTTP calls.","title":"auth"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"custom_endpoint":{"title":"custom_endpoint","type":"string"},"drain_interval":{"title":"drain_interval","type":"integer"},"endpoint":{"description":"The target URL to send data to (e.g.: http://some.url:9411/v1/traces).","title":"endpoint","type":"string"},"headers":{"description":"Additional headers attached to each HTTP request sent by the client.\nExisting header values are overwritten if collision happens.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"idle_conn_timeout":{"description":"IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"idle_conn_timeout","type":"string"},"max_conns_per_host":{"description":"MaxConnsPerHost limits the total number of connections per host, including connections in the dialing,\nactive, and idle states.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_conns_per_host","type":"integer"},"max_idle_conns":{"description":"MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns","type":"integer"},"max_idle_conns_per_host":{"description":"MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns_per_host","type":"integer"},"queue_capacity":{"title":"queue_capacity","type":"integer"},"queue_max_length":{"title":"queue_max_length","type":"integer"},"read_buffer_size":{"description":"ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize.","title":"read_buffer_size","type":"integer"},"region":{"title":"region","type":"string"},"retry_on_failure":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.RetrySettings","title":"retry_on_failure"},"sending_queue":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.QueueSettings","title":"sending_queue"},"timeout":{"description":"Timeout parameter configures `http.Client.Timeout`.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"write_buffer_size":{"description":"WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize.","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.lokiexporter.Config":{"additionalProperties":false,"description":"Config defines configuration for Loki exporter.","markdownDescription":"# Loki Exporter\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: logs |\n| Distributions | [contrib], [observiq] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n\u003c!-- end autogenerated section --\u003e\nExports data via HTTP to [Loki](https://grafana.com/docs/loki/latest/).\n\n## Getting Started\n\nThe following settings are required:\n\n- `endpoint` (no default): The target URL to send Loki log streams to (e.g.: `http://loki:3100/loki/api/v1/push`).\n\nExample:\n```yaml\nexporters:\n loki:\n endpoint: https://loki.example.com:3100/loki/api/v1/push\n```\n\n## Configuration via attribute hints\n\n### Labels\nThe Loki exporter can convert OTLP resource and log attributes into Loki labels, which are indexed. For that, you need to configure\nhints, specifying which attributes should be placed as labels. The hints are themselves attributes and will be ignored when\nexporting to Loki. The following example uses the `attributes` processor to hint the Loki exporter to set the `event.domain`\nattribute as label and the `resource` processor to give a hint to the Loki exporter to set the `service.name` as label.\n\n```yaml\nprocessors:\n attributes:\n actions:\n - action: insert\n key: loki.attribute.labels\n value: event.domain\n\n resource:\n attributes:\n - action: insert\n key: loki.resource.labels\n value: service.name\n```\n\nCurrently, Loki does not support label names with dots. \nThat's why lokiexporter normalizes label names to follow Prometheus label names standard before sending requests to Loki.\nMore information on label normalization could be found [here](../../pkg/translator/prometheus/README.md#Labels)\n\nThe promotion of multiple resource and log attributes to labels is done with single action with comma-separated desired labels:\n```yaml\nprocessors:\n attributes:\n actions:\n - action: insert\n key: loki.attribute.labels\n value: event.domain, event.name\n\n resource:\n attributes:\n - action: insert\n key: loki.resource.labels\n value: service.name, service.namespace\n```\n\nDefault labels:\n- `job=service.namespace/service.name`\n- `instance=service.instance.id`\n- `exporter=OTLP`\n\n`exporter=OTLP` is always set.\n\nIf `service.name` and `service.namespace` are present then `job=service.namespace/service.name` is set\n\nIf `service.name` is present and `service.namespace` is not present then `job=service.name` is set\n\nIf `service.name` is not present and `service.namespace` is present then `job` label is not set\n\nIf `service.instance.id` is present then `instance=service.instance.id` is set\n\nIf `service.instance.id` is not present then `instance` label is not set\n\nThe full list of settings exposed for this exporter are documented [here](./config.go) with detailed sample\nconfigurations [here](./testdata/config.yaml).\n\nMore information on how to send logs to Grafana Loki using the OpenTelemetry Collector could be found [here](https://grafana.com/docs/opentelemetry/collector/send-logs-to-loki/)\n\n### Tenant information\n\nIt is recommended to use the [`header_setter`](../../extension/headerssetterextension/README.md) extension to configure the tenant information to send to Loki. In case a static tenant\nshould be used, you can make use of the `headers` option for regular HTTP client settings, like the following:\n\n```yaml\nexporters:\n loki:\n endpoint: http://localhost:3100/loki/api/v1/push\n headers:\n \"X-Scope-OrgID\": acme\n```\n\nIt is also possible to provide the `loki.tenant` attribute hint that specifies\nwhich resource or log attributes value should be used as a tenant. For example:\n\n```yaml\nprocessors:\n resource:\n attributes:\n - action: insert\n key: loki.tenant\n value: host.name\n```\n\nIn this case the value of the `host.name` resource attribute is used to group logs\nby tenant and send requests with the `X-Scope-OrgID` header set to relevant tenants.\n\nIf the `loki.tenant` hint attribute is present in both resource and log attributes,\nthen the look-up for a tenant value from resource attributes takes precedence.\n\n### Format\nTo choose the format used for writing log lines by the exporter use the `loki.format` hint. For example:\n\n```yaml\nprocessors:\n resource:\n attributes:\n - action: insert\n key: loki.format\n value: logfmt\n```\n\nThe following formats are supported:\n\n- `logfmt`: Write logs as [logfmt](https://brandur.org/logfmt) lines.\n- `json`: Write logs as JSON objects. It is the default format if no hint is present.\n- `raw`: Write the body of the log message as string representation.\n\n## Severity\n\nOpenTelemetry uses `record.severity` to track log levels where loki uses `record.attributes.level` for the same. The exporter automatically maps the two, except if a \"level\" attribute already exists.\n\n## Advanced Configuration\n\nSeveral helper files are leveraged to provide additional capabilities automatically:\n\n- [HTTP settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/confighttp/README.md)\n- [Queuing and retry settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md)","properties":{"CustomRoundTripper":{"description":"Custom Round Tripper to allow for individual components to intercept HTTP requests","title":"CustomRoundTripper"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing HTTP calls.","title":"auth"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"endpoint":{"description":"The target URL to send data to (e.g.: http://some.url:9411/v1/traces).","title":"endpoint","type":"string"},"headers":{"description":"Additional headers attached to each HTTP request sent by the client.\nExisting header values are overwritten if collision happens.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"idle_conn_timeout":{"description":"IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"idle_conn_timeout","type":"string"},"max_conns_per_host":{"description":"MaxConnsPerHost limits the total number of connections per host, including connections in the dialing,\nactive, and idle states.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_conns_per_host","type":"integer"},"max_idle_conns":{"description":"MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns","type":"integer"},"max_idle_conns_per_host":{"description":"MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns_per_host","type":"integer"},"read_buffer_size":{"description":"ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize.","title":"read_buffer_size","type":"integer"},"retry_on_failure":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.RetrySettings","title":"retry_on_failure"},"sending_queue":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.QueueSettings","title":"sending_queue"},"timeout":{"description":"Timeout parameter configures `http.Client.Timeout`.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"write_buffer_size":{"description":"WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize.","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.mezmoexporter.Config":{"additionalProperties":false,"description":"Config defines configuration for Mezmo exporter.","markdownDescription":"# Mezmo Exporter\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: logs |\n| Distributions | [contrib] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n\u003c!-- end autogenerated section --\u003e\n\nThis exporter supports sending OpenTelemetry log data to\n[Mezmo](https://mezmo.com).\n\nNote: Mezmo logs ingestion [requires a `hostname`](https://docs.mezmo.com/docs/log-parsing#hostname)\nfield to be present. When logs are sent via this exporter, and `hostname`\nmetadata is not added, the Mezmo ingestion API will set `hostname=otel`. To\nprovide the `hostname` information, we recommend adding a\n[Resource Detection Processor](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/resourcedetectionprocessor)\nto the collector configuration. Doing so will cause this exporter to\nautomatically add the `hostname` metadata to the outgoing log data whenever\nit is available. See the below example configuration for a basic configuration\nthat adds `hostname` detection support.\n\n# Configuration options:\n\n- `ingest_url` (optional): Specifies the URL to send ingested logs to. If not \nspecified, will default to `https://logs.mezmo.com/otel/ingest/rest`.\n- `ingest_key` (required): Ingestion key used to send log data to Mezmo. See\n[Ingestion Keys](https://docs.mezmo.com/docs/ingestion-key) for more details.\n\n# Example:\n## Simple Log Data\n\n```yaml\nreceivers:\n otlp:\n protocols:\n grpc:\n endpoint: \":4317\"\n\nprocessors:\n resourcedetection:\n detectors:\n - system\n system:\n hostname_sources:\n - os\n\nexporters:\n mezmo:\n ingest_url: \"https://logs.mezmo.com/otel/ingest/rest\"\n ingest_key: \"00000000000000000000000000000000\"\n\nservice:\n pipelines:\n logs:\n receivers: [ otlp ]\n processors: [ resourcedetection ]\n exporters: [ mezmo ]\n```","properties":{"CustomRoundTripper":{"description":"Custom Round Tripper to allow for individual components to intercept HTTP requests","title":"CustomRoundTripper"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing HTTP calls.","title":"auth"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"endpoint":{"description":"The target URL to send data to (e.g.: http://some.url:9411/v1/traces).","title":"endpoint","type":"string"},"headers":{"description":"Additional headers attached to each HTTP request sent by the client.\nExisting header values are overwritten if collision happens.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"idle_conn_timeout":{"description":"IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"idle_conn_timeout","type":"string"},"ingest_key":{"description":"Token is the authentication token provided by Mezmo.","title":"ingest_key","type":"string"},"ingest_url":{"description":"IngestURL is the URL to send telemetry to.","title":"ingest_url","type":"string"},"max_conns_per_host":{"description":"MaxConnsPerHost limits the total number of connections per host, including connections in the dialing,\nactive, and idle states.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_conns_per_host","type":"integer"},"max_idle_conns":{"description":"MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns","type":"integer"},"max_idle_conns_per_host":{"description":"MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns_per_host","type":"integer"},"read_buffer_size":{"description":"ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize.","title":"read_buffer_size","type":"integer"},"retry_on_failure":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.RetrySettings","title":"retry_on_failure"},"sending_queue":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.QueueSettings","title":"sending_queue"},"timeout":{"description":"Timeout parameter configures `http.Client.Timeout`.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"write_buffer_size":{"description":"WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize.","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.opencensusexporter.Config":{"additionalProperties":false,"description":"Config defines configuration for OpenCensus exporter.","markdownDescription":"# OpenCensus gRPC Exporter\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: traces, metrics |\n| Distributions | [contrib], [observiq] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nExports traces and/or metrics via gRPC using\n[OpenCensus](https://opencensus.io/) format.\n\n## Getting Started\n\nThe following settings are required:\n\n- `endpoint` (no default): host:port to which the exporter is going to send Jaeger trace data,\nusing the gRPC protocol. The valid syntax is described\n[here](https://github.com/grpc/grpc/blob/master/doc/naming.md)\n\nBy default, TLS is enabled and must be configured under `tls:`:\n\n- `insecure` (default = `false`): whether to enable client transport security for\n the exporter's connection.\n\nAs a result, the following parameters are also required under `tls:`:\n\n- `cert_file` (no default): path to the TLS cert to use for TLS required connections. Should\n only be used if `insecure` is set to false.\n- `key_file` (no default): path to the TLS key to use for TLS required connections. Should\n only be used if `insecure` is set to false.\n\nExample:\n\n```yaml\nexporters:\n opencensus:\n endpoint: opencensus2:55678\n tls:\n cert_file: file.cert\n key_file: file.key\n opencensus/2:\n endpoint: opencensus2:55678\n tls:\n insecure: true\n```\n\n## Advanced Configuration\n\nSeveral helper files are leveraged to provide additional capabilities automatically:\n\n- [gRPC settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configgrpc/README.md)\n- [TLS and mTLS settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md)\n- [Queuing, retry and timeout settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md)","properties":{"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing RPCs.","title":"auth"},"balancer_name":{"description":"Sets the balancer in grpclb_policy to discover the servers. Default is pick_first.\nhttps://github.com/grpc/grpc-go/blob/master/examples/features/load_balancing/README.md","title":"balancer_name","type":"string"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"endpoint":{"description":"The target to which the exporter is going to send traces or metrics,\nusing the gRPC protocol. The valid syntax is described at\nhttps://github.com/grpc/grpc/blob/master/doc/naming.md.","title":"endpoint","type":"string"},"headers":{"description":"The headers associated with gRPC requests.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"keepalive":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configgrpc.KeepaliveClientConfig","description":"The keepalive parameters for gRPC client. See grpc.WithKeepaliveParams.\n(https://godoc.org/google.golang.org/grpc#WithKeepaliveParams).","title":"keepalive"},"num_workers":{"description":"The number of workers that send the gRPC requests.","title":"num_workers","type":"integer"},"read_buffer_size":{"description":"ReadBufferSize for gRPC client. See grpc.WithReadBufferSize.\n(https://godoc.org/google.golang.org/grpc#WithReadBufferSize).","title":"read_buffer_size","type":"integer"},"retry_on_failure":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.RetrySettings","title":"retry_on_failure"},"sending_queue":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.QueueSettings","title":"sending_queue"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"wait_for_ready":{"description":"WaitForReady parameter configures client to wait for ready state before sending data.\n(https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md)","title":"wait_for_ready","type":"boolean"},"write_buffer_size":{"description":"WriteBufferSize for gRPC gRPC. See grpc.WithWriteBufferSize.\n(https://godoc.org/google.golang.org/grpc#WithWriteBufferSize).","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.parquetexporter.Config":{"additionalProperties":false,"markdownDescription":"# Parquet File Exporter\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [development]: traces, metrics, logs |\n| Distributions | [contrib] |\n\n[development]: https://github.com/open-telemetry/opentelemetry-collector#development\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n\u003c!-- end autogenerated section --\u003e\n\nSends pipeline data to Parquet files.\n\n## Configuration\n\nThe following configuration options are required:\n\n- `path` (no default): Export Parquet file path.\n\nThe following configuration options can also be configured:\n\nTODO\n\nExample:\n\n```yaml\nexporters:\n parquet:\n path: /var/output/log.parquet\n```\n\nThe full list of settings exposed for this exporter is going to be documented later\nwith detailed sample configurations [here](testdata/config.yaml).","properties":{"path":{"title":"path","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.prometheusexporter.Config":{"additionalProperties":false,"description":"Config defines configuration for Prometheus exporter.","markdownDescription":"# Prometheus Exporter\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: metrics |\n| Distributions | [core], [contrib], [aws], [observiq], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[aws]: https://github.com/aws-observability/aws-otel-collector\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nExports data in the [Prometheus format](https://prometheus.io/docs/concepts/data_model/), which allows it to be scraped by a [Prometheus](https://prometheus.io/) server.\n\n## Getting Started\n\nThe following settings are required:\n\n- `endpoint` (no default): the address on which metrics will be exposed, using path `/metrics`. For full list of `HTTPServerSettings` refer [here](https://github.com/open-telemetry/opentelemetry-collector/tree/main/config/confighttp).\n\nThe following settings can be optionally configured:\n\n- `const_labels` (no default): key/values that are applied for every exported metric.\n- `namespace` (no default): if set, exports metrics under the provided value.\n- `send_timestamps` (default = `false`): if true, sends the timestamp of the underlying metric sample in the response.\n- `metric_expiration` (default = `5m`): defines how long metrics are exposed without updates\n- `resource_to_telemetry_conversion`\n - `enabled` (default = false): If `enabled` is `true`, all the resource attributes will be converted to metric labels by default.\n- `enable_open_metrics`: (default = `false`): If true, metrics will be exported using the OpenMetrics format. Exemplars are only exported in the OpenMetrics format, and only for histogram and monotonic sum (i.e. counter) metrics.\n\nExample:\n\n```yaml\nexporters:\n prometheus:\n endpoint: \"1.2.3.4:1234\"\n tls:\n ca_file: \"/path/to/ca.pem\"\n cert_file: \"/path/to/cert.pem\"\n key_file: \"/path/to/key.pem\"\n namespace: test-space\n const_labels:\n label1: value1\n \"another label\": spaced value\n send_timestamps: true\n metric_expiration: 180m\n enable_open_metrics: true\n resource_to_telemetry_conversion:\n enabled: true\n```\n\nGiven the example, metrics will be available at `https://1.2.3.4:1234/metrics`.\n\n## Metric names and labels normalization\n\nOpenTelemetry metric names and attributes are normalized to be compliant with Prometheus naming rules. [Details on this normalization process are described in the Prometheus translator module](../../pkg/translator/prometheus/).","properties":{"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth for this receiver","title":"auth"},"const_labels":{"$ref":"#/$defs/github.com.prometheus.client_golang.prometheus.Labels","description":"ConstLabels are values that are applied for every exported metric.","title":"const_labels"},"cors":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.confighttp.CORSSettings","description":"CORS configures the server for HTTP cross-origin resource sharing (CORS).","title":"cors"},"enable_open_metrics":{"description":"EnableOpenMetrics enables the use of the OpenMetrics encoding option for the prometheus exporter.","title":"enable_open_metrics","type":"boolean"},"endpoint":{"description":"Endpoint configures the listening address for the server.","title":"endpoint","type":"string"},"include_metadata":{"description":"IncludeMetadata propagates the client metadata from the incoming requests to the downstream consumers\nExperimental: *NOTE* this option is subject to change or removal in the future.","title":"include_metadata","type":"boolean"},"max_request_body_size":{"description":"MaxRequestBodySize sets the maximum request body size in bytes","title":"max_request_body_size","type":"integer"},"metric_expiration":{"description":"MetricExpiration defines how long metrics are kept without updates","title":"metric_expiration","type":"string"},"namespace":{"description":"Namespace if set, exports metrics under the provided value.","title":"namespace","type":"string"},"resource_to_telemetry_conversion":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.pkg.resourcetotelemetry.Settings","description":"ResourceToTelemetrySettings defines configuration for converting resource attributes to metric labels.","title":"resource_to_telemetry_conversion"},"response_headers":{"description":"Additional headers attached to each HTTP response sent to the client.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"response_headers","type":"object"},"send_timestamps":{"description":"SendTimestamps will send the underlying scrape timestamp with the export","title":"send_timestamps","type":"boolean"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSServerSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.prometheusremotewriteexporter.Config":{"additionalProperties":false,"description":"Config defines configuration for Remote Write exporter.","markdownDescription":"# Prometheus Remote Write Exporter\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: metrics |\n| Distributions | [core], [contrib], [aws], [observiq] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[aws]: https://github.com/aws-observability/aws-otel-collector\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nPrometheus Remote Write Exporter sends OpenTelemetry metrics\nto Prometheus [remote write compatible\nbackends](https://prometheus.io/docs/operating/integrations/)\nsuch as Cortex, Mimir, and Thanos.\nBy default, this exporter requires TLS and offers queued retry capabilities.\n\n:warning: Non-cumulative monotonic, histogram, and summary OTLP metrics are\ndropped by this exporter.\n\nA [design doc](DESIGN.md) is available to document in detail\nhow this exporter works.\n\n## Getting Started\n\nThe following settings are required:\n\n- `endpoint` (no default): The remote write URL to send remote write samples.\n\nBy default, TLS is enabled and must be configured under `tls:`:\n\n- `insecure` (default = `false`): whether to enable client transport security for\n the exporter's connection.\n\nAs a result, the following parameters are also required under `tls:`:\n\n- `cert_file` (no default): path to the TLS cert to use for TLS required connections. Should\n only be used if `insecure` is set to false.\n- `key_file` (no default): path to the TLS key to use for TLS required connections. Should\n only be used if `insecure` is set to false.\n\nThe following settings can be optionally configured:\n\n- `external_labels`: map of labels names and values to be attached to each metric data point\n- `headers`: additional headers attached to each HTTP request.\n - *Note the following headers cannot be changed: `Content-Encoding`, `Content-Type`, `X-Prometheus-Remote-Write-Version`, and `User-Agent`.*\n- `namespace`: prefix attached to each exported metric name.\n- `remote_write_queue`: fine tuning for queueing and sending of the outgoing remote writes.\n - `enabled`: enable the sending queue\n - `queue_size`: number of OTLP metrics that can be queued. Ignored if `enabled` is `false`\n - `num_consumers`: minimum number of workers to use to fan out the outgoing requests.\n- `resource_to_telemetry_conversion`\n - `enabled` (default = false): If `enabled` is `true`, all the resource attributes will be converted to metric labels by default.\n- `target_info`: customize `target_info` metric\n - `enabled` (default = true): If `enabled` is `true`, a `target_info` metric will be generated for each resource metric (see https://github.com/open-telemetry/opentelemetry-specification/pull/2381).\n- `export_created_metric`:\n - `enabled` (default = false): If `enabled` is `true`, a `_created` metric is\n exported for Summary, Histogram, and Monotonic Sum metric points if\n `StartTimeUnixNano` is set.\n\nExample:\n\n```yaml\nexporters:\n prometheusremotewrite:\n endpoint: \"https://my-cortex:7900/api/v1/push\"\n wal: # Enabling the Write-Ahead-Log for the exporter.\n directory: ./prom_rw # The directory to store the WAL in\n buffer_size: 100 # Optional count of elements to be read from the WAL before truncating; default of 300\n truncate_frequency: 45s # Optional frequency for how often the WAL should be truncated. It is a time.ParseDuration; default of 1m\n resource_to_telemetry_conversion:\n enabled: true # Convert resource attributes to metric labels\n```\n\nExample:\n\n```yaml\nexporters:\n prometheusremotewrite:\n endpoint: \"https://my-cortex:7900/api/v1/push\"\n external_labels:\n label_name1: label_value1\n label_name2: label_value2\n```\n\n## Advanced Configuration\n\nSeveral helper files are leveraged to provide additional capabilities automatically:\n\n- [HTTP settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/confighttp/README.md)\n- [TLS and mTLS settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md)\n- [Retry and timeout settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md), note that the exporter doesn't support `sending_queue` but provides `remote_write_queue`.\n\n## Metric names and labels normalization\n\nOpenTelemetry metric names and attributes are normalized to be compliant with Prometheus naming rules. [Details on this normalization process are described in the Prometheus translator module](../../pkg/translator/prometheus/).\n\n[beta]:https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]:https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[core]:https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol","properties":{"CustomRoundTripper":{"description":"Custom Round Tripper to allow for individual components to intercept HTTP requests","title":"CustomRoundTripper"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing HTTP calls.","title":"auth"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"endpoint":{"description":"The target URL to send data to (e.g.: http://some.url:9411/v1/traces).","title":"endpoint","type":"string"},"export_created_metric":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.prometheusremotewriteexporter.CreatedMetric","description":"CreatedMetric allows customizing creation of _created metrics","title":"export_created_metric"},"external_labels":{"description":"ExternalLabels defines a map of label keys and values that are allowed to start with reserved prefix \"__\"","patternProperties":{".*":{"type":"string"}},"title":"external_labels","type":"object"},"headers":{"description":"Additional headers attached to each HTTP request sent by the client.\nExisting header values are overwritten if collision happens.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"idle_conn_timeout":{"description":"IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"idle_conn_timeout","type":"string"},"max_conns_per_host":{"description":"MaxConnsPerHost limits the total number of connections per host, including connections in the dialing,\nactive, and idle states.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_conns_per_host","type":"integer"},"max_idle_conns":{"description":"MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns","type":"integer"},"max_idle_conns_per_host":{"description":"MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns_per_host","type":"integer"},"namespace":{"description":"prefix attached to each exported metric name\nSee: https://prometheus.io/docs/practices/naming/#metric-names","title":"namespace","type":"string"},"read_buffer_size":{"description":"ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize.","title":"read_buffer_size","type":"integer"},"remote_write_queue":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.prometheusremotewriteexporter.RemoteWriteQueue","description":"QueueConfig allows users to fine tune the queues\nthat handle outgoing requests.","title":"remote_write_queue"},"resource_to_telemetry_conversion":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.pkg.resourcetotelemetry.Settings","description":"ResourceToTelemetrySettings is the option for converting resource attributes to telemetry attributes.\n\"Enabled\" - A boolean field to enable/disable this option. Default is `false`.\nIf enabled, all the resource attributes will be converted to metric labels by default.","title":"resource_to_telemetry_conversion"},"retry_on_failure":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.RetrySettings","title":"retry_on_failure"},"target_info":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.prometheusremotewriteexporter.TargetInfo","description":"TargetInfo allows customizing the target_info metric","title":"target_info"},"timeout":{"description":"Timeout parameter configures `http.Client.Timeout`.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"wal":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.prometheusremotewriteexporter.WALConfig","title":"wal"},"write_buffer_size":{"description":"WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize.","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.prometheusremotewriteexporter.CreatedMetric":{"additionalProperties":false,"properties":{"enabled":{"description":"Enabled if true the _created metrics could be exported","title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.prometheusremotewriteexporter.RemoteWriteQueue":{"additionalProperties":false,"description":"RemoteWriteQueue allows to configure the remote write queue.","properties":{"enabled":{"description":"Enabled if false the queue is not enabled, the export requests\nare executed synchronously.","title":"enabled","type":"boolean"},"num_consumers":{"description":"NumWorkers configures the number of workers used by\nthe collector to fan out remote write requests.","title":"num_consumers","type":"integer"},"queue_size":{"description":"QueueSize is the maximum number of OTLP metric batches allowed\nin the queue at a given time. Ignored if Enabled is false.","title":"queue_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.prometheusremotewriteexporter.TargetInfo":{"additionalProperties":false,"properties":{"enabled":{"description":"Enabled if false the target_info metric is not generated by the exporter","title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.prometheusremotewriteexporter.WALConfig":{"additionalProperties":false,"markdownDescription":"# Prometheus Remote Write Exporter\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: metrics |\n| Distributions | [core], [contrib], [aws], [observiq] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[aws]: https://github.com/aws-observability/aws-otel-collector\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nPrometheus Remote Write Exporter sends OpenTelemetry metrics\nto Prometheus [remote write compatible\nbackends](https://prometheus.io/docs/operating/integrations/)\nsuch as Cortex, Mimir, and Thanos.\nBy default, this exporter requires TLS and offers queued retry capabilities.\n\n:warning: Non-cumulative monotonic, histogram, and summary OTLP metrics are\ndropped by this exporter.\n\nA [design doc](DESIGN.md) is available to document in detail\nhow this exporter works.\n\n## Getting Started\n\nThe following settings are required:\n\n- `endpoint` (no default): The remote write URL to send remote write samples.\n\nBy default, TLS is enabled and must be configured under `tls:`:\n\n- `insecure` (default = `false`): whether to enable client transport security for\n the exporter's connection.\n\nAs a result, the following parameters are also required under `tls:`:\n\n- `cert_file` (no default): path to the TLS cert to use for TLS required connections. Should\n only be used if `insecure` is set to false.\n- `key_file` (no default): path to the TLS key to use for TLS required connections. Should\n only be used if `insecure` is set to false.\n\nThe following settings can be optionally configured:\n\n- `external_labels`: map of labels names and values to be attached to each metric data point\n- `headers`: additional headers attached to each HTTP request.\n - *Note the following headers cannot be changed: `Content-Encoding`, `Content-Type`, `X-Prometheus-Remote-Write-Version`, and `User-Agent`.*\n- `namespace`: prefix attached to each exported metric name.\n- `remote_write_queue`: fine tuning for queueing and sending of the outgoing remote writes.\n - `enabled`: enable the sending queue\n - `queue_size`: number of OTLP metrics that can be queued. Ignored if `enabled` is `false`\n - `num_consumers`: minimum number of workers to use to fan out the outgoing requests.\n- `resource_to_telemetry_conversion`\n - `enabled` (default = false): If `enabled` is `true`, all the resource attributes will be converted to metric labels by default.\n- `target_info`: customize `target_info` metric\n - `enabled` (default = true): If `enabled` is `true`, a `target_info` metric will be generated for each resource metric (see https://github.com/open-telemetry/opentelemetry-specification/pull/2381).\n- `export_created_metric`:\n - `enabled` (default = false): If `enabled` is `true`, a `_created` metric is\n exported for Summary, Histogram, and Monotonic Sum metric points if\n `StartTimeUnixNano` is set.\n\nExample:\n\n```yaml\nexporters:\n prometheusremotewrite:\n endpoint: \"https://my-cortex:7900/api/v1/push\"\n wal: # Enabling the Write-Ahead-Log for the exporter.\n directory: ./prom_rw # The directory to store the WAL in\n buffer_size: 100 # Optional count of elements to be read from the WAL before truncating; default of 300\n truncate_frequency: 45s # Optional frequency for how often the WAL should be truncated. It is a time.ParseDuration; default of 1m\n resource_to_telemetry_conversion:\n enabled: true # Convert resource attributes to metric labels\n```\n\nExample:\n\n```yaml\nexporters:\n prometheusremotewrite:\n endpoint: \"https://my-cortex:7900/api/v1/push\"\n external_labels:\n label_name1: label_value1\n label_name2: label_value2\n```\n\n## Advanced Configuration\n\nSeveral helper files are leveraged to provide additional capabilities automatically:\n\n- [HTTP settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/confighttp/README.md)\n- [TLS and mTLS settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md)\n- [Retry and timeout settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md), note that the exporter doesn't support `sending_queue` but provides `remote_write_queue`.\n\n## Metric names and labels normalization\n\nOpenTelemetry metric names and attributes are normalized to be compliant with Prometheus naming rules. [Details on this normalization process are described in the Prometheus translator module](../../pkg/translator/prometheus/).\n\n[beta]:https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]:https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[core]:https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol","properties":{"buffer_size":{"title":"buffer_size","type":"integer"},"directory":{"title":"directory","type":"string"},"truncate_frequency":{"title":"truncate_frequency","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.pulsarexporter.Athenz":{"additionalProperties":false,"properties":{"key_id":{"title":"key_id","type":"string"},"principal_header":{"title":"principal_header","type":"string"},"private_key":{"title":"private_key","type":"string"},"provider_domain":{"title":"provider_domain","type":"string"},"tenant_domain":{"title":"tenant_domain","type":"string"},"tenant_service":{"title":"tenant_service","type":"string"},"zts_url":{"title":"zts_url","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.pulsarexporter.Authentication":{"additionalProperties":false,"properties":{"athenz":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.pulsarexporter.Athenz","title":"athenz"},"oauth2":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.pulsarexporter.OAuth2","title":"oauth2"},"tls":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.pulsarexporter.TLS","title":"tls"},"token":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.pulsarexporter.Token","title":"token"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.pulsarexporter.Config":{"additionalProperties":false,"description":"Config defines configuration for Pulsar exporter.","properties":{"auth":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.pulsarexporter.Authentication","title":"auth"},"connection_timeout":{"title":"connection_timeout","type":"string"},"encoding":{"description":"Encoding of messages (default \"otlp_proto\")","title":"encoding","type":"string"},"endpoint":{"description":"Endpoint of pulsar broker (default \"pulsar://localhost:6650\")","title":"endpoint","type":"string"},"map_connections_per_broker":{"title":"map_connections_per_broker","type":"integer"},"operation_timeout":{"title":"operation_timeout","type":"string"},"producer":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.pulsarexporter.Producer","description":"Producer configuration of the Pulsar producer","title":"producer"},"retry_on_failure":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.RetrySettings","title":"retry_on_failure"},"sending_queue":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.QueueSettings","title":"sending_queue"},"timeout":{"description":"Timeout is the timeout for every attempt to send data to the backend.","title":"timeout","type":"string"},"tls_allow_insecure_connection":{"description":"Configure whether the Pulsar client accept untrusted TLS certificate from broker (default: false)","title":"tls_allow_insecure_connection","type":"boolean"},"tls_trust_certs_file_path":{"description":"Set the path to the trusted TLS certificate file","title":"tls_trust_certs_file_path","type":"string"},"topic":{"description":"The name of the pulsar topic to export to (default otlp_spans for traces, otlp_metrics for metrics)","title":"topic","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.pulsarexporter.OAuth2":{"additionalProperties":false,"properties":{"audience":{"title":"audience","type":"string"},"client_id":{"title":"client_id","type":"string"},"issuer_url":{"title":"issuer_url","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.pulsarexporter.Producer":{"additionalProperties":false,"description":"Producer defines configuration for producer","properties":{"batch_builder_type":{"title":"batch_builder_type","type":"string"},"batching_max_messages":{"title":"batching_max_messages","type":"integer"},"batching_max_publish_delay":{"title":"batching_max_publish_delay","type":"string"},"batching_max_size":{"title":"batching_max_size","type":"integer"},"compression_level":{"title":"compression_level","type":"string"},"compression_type":{"title":"compression_type","type":"string"},"disable_batching":{"title":"disable_batching","type":"boolean"},"disable_block_if_queue_full":{"title":"disable_block_if_queue_full","type":"boolean"},"hashing_scheme":{"title":"hashing_scheme","type":"string"},"max_pending_messages":{"title":"max_pending_messages","type":"integer"},"max_reconnect_broker":{"title":"max_reconnect_broker","type":"integer"},"partitions_auto_discovery_interval":{"title":"partitions_auto_discovery_interval","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.pulsarexporter.TLS":{"additionalProperties":false,"properties":{"cert_file":{"title":"cert_file","type":"string"},"key_file":{"title":"key_file","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.pulsarexporter.Token":{"additionalProperties":false,"properties":{"token":{"title":"token","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.sapmexporter.Config":{"additionalProperties":false,"description":"Config defines configuration for SAPM exporter.","markdownDescription":"# SAPM Exporter\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: traces |\n| Distributions | [contrib], [aws], [observiq], [splunk] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[aws]: https://github.com/aws-observability/aws-otel-collector\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe SAPM exporter builds on the Jaeger proto and adds additional batching on top. This allows\nthe collector to export traces from multiples nodes/services in a single batch. The SAPM proto\nand some useful related utilities can be found [here](https://github.com/signalfx/sapm-proto/).\n\n\u003e Please review the Collector's [security\n\u003e documentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/security-best-practices.md),\n\u003e which contains recommendations on securing sensitive information such as the\n\u003e API key required by this exporter.\n\n## Configuration\n\nThe following configuration options are required:\n\n- `access_token` (no default): AccessToken is the authentication token provided by SignalFx or\nanother backend that supports the SAPM proto. The SignalFx access token can be obtained from the\nweb app. For details on how to do so please refer the documentation [here](https://docs.signalfx.com/en/latest/admin-guide/tokens.html#access-tokens).\n- `endpoint` (no default): This is the destination to where traces will be sent to in SAPM\nformat. It must be a full URL and include the scheme, port and path e.g,\n\u003c!-- markdown-link-check-disable-line --\u003ehttps://ingest.us0.signalfx.com/v2/trace. This can be pointed to the SignalFx \nbackend or to another Otel collector that has the SAPM receiver enabled.\n\nThe following configuration options can also be configured:\n\n- `max_connections` (default = 100): MaxConnections is used to set a limit to the maximum\nidle HTTP connection the exporter can keep open.\n- `num_workers` (default = 8): NumWorkers is the number of workers that should be used to\nexport traces. Exporter can make as many requests in parallel as the number of workers. Note\nthat this will likely be removed in future in favour of processors handling parallel exporting.\n- `access_token_passthrough`: (default = `true`) Whether to use `\"com.splunk.signalfx.access_token\"`\ntrace resource attribute, if any, as SFx access token. In either case this attribute will be deleted\nduring final translation. Intended to be used in tandem with identical configuration option for\n[SAPM receiver](../../receiver/sapmreceiver/README.md) to preserve trace origin.\n- `timeout` (default = 5s): Is the timeout for every attempt to send data to the backend.\n- `log_detailed_response` (default = `false`): Option to log detailed response from Splunk APM.\nIn addition to setting this option to `true`, debug logging at the Collector level needs to be enabled.\n- `compression`: Compression method to use for outgoing SAPM requests. Can be one of\n \"gzip\", \"zstd\" or be unspecified. If unspecified then \"gzip\" compression is used unless\n `disable_compression` option is set to true.\n- `disable_compression` (default = `false`): If set to true the outgoing requests are not\n compressed and `compression` option is ignored.\n\nIn addition, this exporter offers queued retry which is enabled by default.\nInformation about queued retry configuration parameters can be found\n[here](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md).\n\nExample:\n\n```yaml\nexporters:\n sapm:\n access_token: YOUR_ACCESS_TOKEN\n access_token_passthrough: true\n endpoint: https://ingest.YOUR_SIGNALFX_REALM.signalfx.com/v2/trace\n max_connections: 100\n num_workers: 8\n log_detailed_response: true\n```\n\nThe full list of settings exposed for this exporter are documented [here](config.go)\nwith detailed sample configurations [here](testdata/config.yaml).\n\nThis exporter also offers proxy support as documented\n[here](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter#proxy-support).","properties":{"access_token":{"description":"AccessToken is the authentication token provided by SignalFx.","title":"access_token","type":"string"},"access_token_passthrough":{"description":"AccessTokenPassthrough indicates whether to associate datapoints with an organization access token received in request.","title":"access_token_passthrough","type":"boolean"},"compression":{"description":"Compression method to use (gzip or zstd). Ignored if DisableCompression=true.\nIf unspecified defaults to gzip.","title":"compression","type":"string"},"disable_compression":{"description":"Disable compression. If set to true then Compression field is ignored.","title":"disable_compression","type":"boolean"},"endpoint":{"description":"Endpoint is the destination to where traces will be sent to in SAPM format.\nIt must be a full URL and include the scheme, port and path e.g, https://ingest.signalfx.com/v2/trace","title":"endpoint","type":"string"},"log_detailed_response":{"description":"Log detailed response from trace ingest.","title":"log_detailed_response","type":"boolean"},"max_connections":{"description":"MaxConnections is used to set a limit to the maximum idle HTTP connection the exporter can keep open.","title":"max_connections","type":"integer"},"num_workers":{"description":"NumWorkers is the number of workers that should be used to export traces.\nExporter can make as many requests in parallel as the number of workers. Defaults to 8.","title":"num_workers","type":"integer"},"retry_on_failure":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.RetrySettings","title":"retry_on_failure"},"sending_queue":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.QueueSettings","title":"sending_queue"},"timeout":{"description":"Timeout is the timeout for every attempt to send data to the backend.","title":"timeout","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.sentryexporter.Config":{"additionalProperties":false,"description":"Config defines the configuration for the Sentry Exporter.","properties":{"dsn":{"description":"DSN to report transaction to Sentry. If the DSN is not set, no trace will be sent to Sentry.","title":"dsn","type":"string"},"environment":{"description":"The deployment environment name, such as production or staging.\nEnvironments are case sensitive. The environment name can't contain newlines, spaces or forward slashes,\ncan't be the string \"None\", or exceed 64 characters.","title":"environment","type":"string"},"insecure_skip_verify":{"description":"InsecureSkipVerify controls whether the client verifies the Sentry server certificate chain","title":"insecure_skip_verify","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.signalfxexporter.Config":{"additionalProperties":false,"description":"Config defines configuration for SignalFx exporter.","markdownDescription":"# SignalFx Metrics Exporter\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: traces, metrics, logs |\n| Distributions | [contrib], [aws], [observiq], [splunk] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[aws]: https://github.com/aws-observability/aws-otel-collector\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThis exporter can be used to send metrics, events, and trace correlation to SignalFx.\n\nApart from metrics, the exporter is also capable of sending metric metadata\n(properties and tags) to SignalFx. Currently, only metric metadata updates from\nthe [k8s_cluster receiver](../../receiver/k8sclusterreceiver/README.md) are\nsupported.\n\n## Metrics Configuration\n\nThe following configuration options are required:\n\n- `access_token` (no default): The access token is the authentication token\n provided by SignalFx. The SignalFx access token can be obtained from the\n web app. For details on how to do so please refer the documentation [here](https://docs.signalfx.com/en/latest/admin-guide/tokens.html#access-tokens).\n- Either `realm` or both `api_url` and `ingest_url`. Both `api_url` and\n `ingest_url` take precedence over `realm`.\n - `realm` (no default): SignalFx realm where the data will be received.\n - `api_url` (no default): Destination to which SignalFx [properties and\n tags](https://docs.signalfx.com/en/latest/metrics-metadata/metrics-metadata.html#metrics-metadata)\n are sent. If `realm` is set, this option is derived and will be\n `https://api.{realm}.signalfx.com`. If a value is explicitly set, the\n value of `realm` will not be used in determining `api_url`. The explicit\n value will be used instead.\n - `ingest_url` (no default): Destination where SignalFx metrics are sent. If\n `realm` is set, this option is derived and will be\n `https://ingest.{realm}.signalfx.com`. If a value is\n explicitly set, the value of `realm` will not be used in determining\n `ingest_url`. The explicit value will be used instead. The exporter will \n automatically append the appropriate path: \"/v2/datapoint\" for metrics, \n and \"/v2/event\" for events.\n\nThe following configuration options can also be configured:\n\n- `access_token_passthrough`: (default = `true`) Whether to use\n `\"com.splunk.signalfx.access_token\"` metric resource attribute, if any, as the\n SignalFx access token. In either case this attribute will be dropped during\n final translation, in this exporter only. Intended to be used in tandem with\n identical configuration option for [SignalFx\n receiver](../../receiver/signalfxreceiver/README.md) to preserve datapoint\n origin for only this exporter, as others will reveal the organization access token\n by not filtering the attribute.\n- `exclude_metrics`: List of metric filters that will determine metrics to be\n excluded from sending to Signalfx backend. The filtering is applied after the default \n translations controlled by `disable_default_translation_rules` option.\n See [here](./testdata/config.yaml) for examples. Apart from the values explicitly\n provided via this option, by default, [these](./internal/translation/default_metrics.go) are\n also appended to this list. Setting this option to `[]` will override all the default\n excludes.\n- `include_metrics`: List of filters to override exclusion of any metrics.\n This option can be used to included metrics that are otherwise dropped by\n default. See [here](./internal/translation/default_metrics.go) for a list of metrics\n that are dropped by default. For example, the following configuration can be\n used to send through some of that are dropped by default.\n ```yaml\n include_metrics:\n # When sending in translated metrics.\n - metric_names: [cpu.interrupt, cpu.user, cpu.system]\n # When sending in metrics in OTel convention.\n - metric_name: system.cpu.time\n dimensions:\n state: [interrupt, user, system]\n ```\n- `log_data_points` (default = `false`): If the log level is set to `debug` \n and this is true, all datapoints dispatched to Splunk Observability Cloud will be logged\n- `log_dimension_updates` (default = `false`): Whether or not to log dimension\n updates.\n- `disable_default_translation_rules` (default = `false`): Disable default translation\n of the OTel metrics to a SignalFx compatible format. The default translation rules are\n defined in `translation/constants.go`.\n- `timeout` (default = 5s): Amount of time to wait for a send operation to\n complete.\n- `headers` (no default): Headers to pass in the payload.\n- `max_idle_conns` (default = 100): The maximum idle HTTP connections the client can keep open.\n- `max_idle_conns_per_host` (default = 100): The maximum idle HTTP connections the client can keep open per host.\n- `idle_conn_timeout` (default = 30s): The maximum amount of time an idle connection will remain open before closing itself.\n- More HTTP settings are available, see \n [HTTP settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/confighttp/README.md).\n- `sync_host_metadata`: Defines whether the exporter should scrape host metadata\n and send it as property updates to SignalFx backend. Disabled by default.\n IMPORTANT: Host metadata synchronization relies on `resourcedetection`\n processor. If this option is enabled make sure that `resourcedetection`\n processor is enabled in the pipeline with one of the cloud provider detectors\n or environment variable detector setting a unique value to `host.name` attribute\n within your k8s cluster. And keep `override=true` in resourcedetection config.\n- `exclude_properties`: A list of property filters to limit dimension update content.\n Property filters can contain any number of the following fields, supporting (negated)\n string literals, re2 `/regex/`, and [glob](https://github.com/gobwas/glob) syntax values:\n `dimension_name`, `dimension_value`, `property_name`, and `property_value`. For any field\n not expressly configured for each filter object, a default catch-all value of `/^.*$/` is used\n to allow each specified field to require a match for the filter to take effect:\n ```yaml\n # will filter all 'k8s.workload.name' properties from 'k8s.pod.uid' dimension updates:\n exclude_properties:\n - dimension_name: k8s.pod.uid\n property_name: k8s.workload.name\n ```\n- `dimension_client`: Contains options controlling the dimension update client configuration used for metadata updates.\n - `max_buffered:` (default = 10,000): The buffer size for queued dimension updates.\n - `send_delay` (default = 10s): The time to wait between dimension updates for a given dimension.\n - `max_idle_conns` (default = 20): The maximum idle HTTP connections the client can keep open.\n - `max_idle_conns_per_host` (default = 20): The maximum idle HTTP connections the client can keep open per host.\n - `max_conns_per_host` (default = 20): The maximum total number of connections the client can keep open per host.\n - `idle_conn_timeout` (default = 30s): The maximum amount of time an idle connection will remain open before closing itself.\n- `nonalphanumeric_dimension_chars`: (default = `\"_-.\"`) A string of characters \nthat are allowed to be used as a dimension key in addition to alphanumeric \ncharacters. Each nonalphanumeric dimension key character that isn't in this string \nwill be replaced with a `_`.\n- `ingest_tls`: (no default) exposes a list of TLS settings to establish a secure connection with signafx receiver configured on another collector instance.\n - `ca_file` needs to be set if the exporter's `ingest_url` is pointing to a signalfx receiver\n with TLS enabled and using a self-signed certificate where its CA is not loaded in the system cert pool.\n Full list of TLS options can be found in the configtls [README](https://github.com/open-telemetry/opentelemetry-collector/tree/main/config/configtls#client-configuration) \n The following example instructs the signalfx exporter ingest client to use a custom `ca_file` to verify the server certificate.\n ```yaml\n ingest_tls:\n ca_file: \"/etc/opt/certs/ca.pem\"\n ```\n- `api_tls`: (no default) exposes a list of TLS settings to establish a secure connection with http_forwarder extension configured on another collector instance.\n - `ca_file` needs to be set if the exporter's `api_url` is pointing to a http_forwarder extension\n with TLS enabled and using a self-signed certificate where its CA is not loaded in the system cert pool.\n Full list of TLS options can be found in the configtls [README](https://github.com/open-telemetry/opentelemetry-collector/tree/main/config/configtls#client-configuration)\n The following example instructs the signalfx exporter api client to use a custom `ca_file` to verify the server certificate.\n ```yaml\n api_tls:\n ca_file: \"/etc/opt/certs/ca.pem\"\n ```\n\nIn addition, this exporter offers queued retry which is enabled by default.\nInformation about queued retry configuration parameters can be found\n[here](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md).\n\n## Traces Configuration (correlation only)\n\n:warning: _Note that traces must still be sent in using [sapmexporter](../sapmexporter) to see them in SignalFx._\n\nWhen traces are sent to the signalfx exporter it correlates traces to metrics. When a new service or environment is\nseen it associates the source (e.g. host or pod) to that service or environment in SignalFx. Metrics can then be\nfiltered based on that trace service and environment (`sf_service` and `sf_environment`).\n\nOne of `realm` and `api_url` are required.\n\n- `access_token` (required, no default): The access token is the authentication token\n provided by SignalFx.\n- `realm` (no default): SignalFx realm where the data will be received.\n- `api_url` (default = `https://api.{realm}.signalfx.com/`): Destination to which correlation updates\n are sent. If a value is explicitly set, the value of `realm` will not be used in determining `api_url`.\n The explicit value will be used instead.\n- `correlation` Contains options controlling the syncing of service and environment properties onto dimensions.\n - `endpoint` (required, default = `api_url` or `https://api.{realm}.signalfx.com/`): This is the base URL for API requests (e.g. `https://api.us0.signalfx.com`).\n - `timeout` (default = 5s): Is the timeout for every attempt to send data to the backend.\n - `stale_service_timeout` (default = 5 minutes): How long to wait after a span's service name is last seen before uncorrelating it.\n - `max_requests` (default = 20): Max HTTP requests to be made in parallel.\n - `max_buffered` (default = 10,000): Max number of correlation updates that can be buffered before updates are dropped.\n - `max_retries` (default = 2): Max number of retries that will be made for failed correlation updates.\n - `log_updates` (default = false): Whether or not to log correlation updates to dimensions (at `DEBUG` level).\n - `retry_delay` (default = 30 seconds): How long to wait between retries.\n - `cleanup_interval` (default = 1 minute): How frequently to purge duplicate requests.\n - `sync_attributes` (default = `{\"k8s.pod.uid\": \"k8s.pod.uid\", \"container.id\": \"container.id\"}`) Map containing key of the attribute to read from spans to sync to dimensions specified as the value.\n\n## Default Metric Filters\n[List of metrics excluded by default](./internal/translation/default_metrics.go)\n\nSome OpenTelemetry receivers may send metrics that SignalFx considers to be categorized as custom metrics. In order to prevent unwanted overage usage due to custom metrics from these receivers, the SignalFx exporter has a [set of metrics excluded by default](./internal/translation/default_metrics.go). Some exclusion rules use regex to exclude multiple metric names. Some metrics are only excluded if specific resource labels (dimensions) are present. If `translation_rules` are configured and new metrics match a default exclusion, the new metric will still be excluded. Users may configure the SignalFx exporter's `include_metrics` config option to override the any of the default exclusions, as `include_metrics` will always take precedence over any exclusions. An example of `include_metrics` is shown below.\n\n```\nexporters:\n signalfx:\n include_metrics:\n - metric_names: [cpu.interrupt, cpu.user, cpu.system]\n - metric_name: system.cpu.time\n dimensions:\n state: [interrupt, user, system]\n```\n\nThe following `include_metrics` example would instruct the exporter to send only `cpu.interrupt` metrics with a `cpu` dimension value (\"per core\" datapoints), and both \"per core\" and aggregate `cpu.idle` metrics:\n\n```\nexporters:\n signalfx:\n include_metrics:\n - metric_name: \"cpu.idle\"\n - metric_name: \"cpu.interrupt\"\n dimensions:\n cpu: [\"*\"]\n```\n\n## Translation Rules and Metric Transformations\n\nThe `translation_rules` metrics configuration field accepts a list of metric-transforming actions to\nhelp ensure compatibility with custom charts and dashboards when using the OpenTelemetry Collector. It also provides the ability to produce custom metrics by copying, calculating new, or aggregating other metric values without requiring an additional processor.\nThe rule language is expressed in yaml mappings and is [documented here](./internal/translation/translator.go). Translation rules currently allow the following actions:\n\n* `aggregate_metric` - Aggregates a metric through removal of specified dimensions\n* `calculate_new_metric` - Creates a new metric via operating on two consistuent ones\n* `convert_values` - Convert float values to int or int to float for specified metric names\n* `copy_metrics` - Creates a new metric as a copy of another\n* `delta_metric` - Creates a new delta metric for a specified non-delta one\n* `divide_int` - Scales a metric's integer value by a given factor\n* `drop_dimensions` - Drops dimensions for specified metrics, or globally\n* `drop_metrics` - Drops all metrics with a given name\n* `multiply_float` - Scales a metric's float value by a given float factor\n* `multiply_int` - Scales a metric's int value by a given int factor\n* `rename_dimension_keys` - Renames dimensions for specified metrics, or globally\n* `rename_metrics` - Replaces a given metric name with specified one\n* `split_metric` - Splits a given metric into multiple new ones for a specified dimension\n\nThe translation rules defined in [`translation/constants.go`](./internal/translation/constants.go) are used by default for this value. The default rules will create the following aggregated metrics from the [`hostmetrics` receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/receiver/hostmetricsreceiver/README.md):\n\n* cpu.idle\n* cpu.interrupt\n* cpu.nice\n* cpu.num_processors\n* cpu.softirq\n* cpu.steal\n* cpu.system\n* cpu.user\n* cpu.utilization\n* cpu.utilization_per_core\n* cpu.wait\n* disk.summary_utilization\n* disk.utilization\n* disk_ops.pending\n* disk_ops.total\n* memory.total\n* memory.utilization\n* network.total\n* process.cpu_time_seconds\n* system.disk.io.total\n* system.disk.operations.total\n* system.network.io.total\n* system.network.packets.total\n* vmpage_io.memory.in\n* vmpage_io.memory.out\n* vmpage_io.swap.in\n* vmpage_io.swap.out\n\nIn addition to the aggregated metrics, the default translation rules make available the following \"per core\" custom hostmetrics.\nThe CPU number is assigned to the dimension `cpu` \n\n* cpu.interrupt\n* cpu.nice\n* cpu.softirq\n* cpu.steal\n* cpu.system\n* cpu.user\n* cpu.wait\n\nThese metrics are intended to be reported directly to Splunk IM by the SignalFx exporter. Any desired changes to their attributes or values should be made via additional translation rules or from their constituent host metrics.\n\n## Example Config\n\n```yaml\nexporters:\n signalfx:\n access_token: \u003creplace_with_actual_access_token\u003e\n access_token_passthrough: true\n headers:\n added-entry: \"added value\"\n dot.test: test\n realm: us1\n timeout: 5s\n max_idle_conns: 80\n```\n\n\u003e :warning: When enabling the SignalFx receiver or exporter, configure both the `metrics` and `logs` pipelines.\n\n```yaml\nservice:\n pipelines:\n metrics:\n receivers: [signalfx]\n processors: [memory_limiter, batch]\n exporters: [signalfx]\n logs:\n receivers: [signalfx]\n processors: [memory_limiter, batch]\n exporters: [signalfx]\n traces:\n receivers: [zipkin]\n processors: []\n exporters: [signalfx]\n```\n\nThe full list of settings exposed for this exporter are documented [here](config.go)\nwith detailed sample configurations [here](testdata/config.yaml).\n\nThis exporter also offers proxy support as documented\n[here](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter#proxy-support).\n\n## Advanced Configuration\n\nSeveral helper files are leveraged to provide additional capabilities automatically:\n\n- [HTTP settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/confighttp/README.md)\n- [TLS and mTLS settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md)\n- [Queuing, retry and timeout settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md)","properties":{"CustomRoundTripper":{"description":"Custom Round Tripper to allow for individual components to intercept HTTP requests","title":"CustomRoundTripper"},"access_token":{"description":"AccessToken is the authentication token provided by SignalFx.","title":"access_token","type":"string"},"access_token_passthrough":{"description":"AccessTokenPassthrough indicates whether to associate datapoints with an organization access token received in request.","title":"access_token_passthrough","type":"boolean"},"api_tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"api_tls needs to be set if the exporter's APIURL is pointing to a httforwarder extension\nwith TLS enabled and using a self-signed certificate where its CA is not loaded in the system cert pool.","title":"api_tls"},"api_url":{"description":"APIURL is the destination to where SignalFx metadata will be sent. This\nvalue takes precedence over the value of Realm","title":"api_url","type":"string"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing HTTP calls.","title":"auth"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"correlation":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.signalfxexporter.internal.correlation.Config","description":"Correlation configuration for syncing traces service and environment to metrics.","title":"correlation"},"delta_translation_ttl":{"description":"DeltaTranslationTTL specifies in seconds the max duration to keep the most recent datapoint for any\n`delta_metric` specified in TranslationRules. Default is 3600s.","title":"delta_translation_ttl","type":"integer"},"dimension_client":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.signalfxexporter.DimensionClientConfig","description":"Dimension update client configuration used for metadata updates.","title":"dimension_client"},"disable_default_translation_rules":{"title":"disable_default_translation_rules","type":"boolean"},"endpoint":{"description":"The target URL to send data to (e.g.: http://some.url:9411/v1/traces).","title":"endpoint","type":"string"},"exclude_metrics":{"description":"ExcludeMetrics defines dpfilter.MetricFilters that will determine metrics to be\nexcluded from sending to SignalFx backend. If translations enabled with\nTranslationRules options, the exclusion will be applie on translated metrics.","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.signalfxexporter.internal.translation.dpfilters.MetricFilter"},"title":"exclude_metrics","type":"array"},"exclude_properties":{"description":"ExcludeProperties defines dpfilter.PropertyFilters to prevent inclusion of\nproperties to include with dimension updates to the SignalFx backend.","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.signalfxexporter.internal.translation.dpfilters.PropertyFilter"},"title":"exclude_properties","type":"array"},"headers":{"description":"Additional headers attached to each HTTP request sent by the client.\nExisting header values are overwritten if collision happens.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"idle_conn_timeout":{"description":"IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"idle_conn_timeout","type":"string"},"include_metrics":{"description":"IncludeMetrics defines dpfilter.MetricFilters to override exclusion any of metric.\nThis option can be used to included metrics that are otherwise dropped by default.\nSee ./translation/default_metrics.go for a list of metrics that are dropped by default.","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.signalfxexporter.internal.translation.dpfilters.MetricFilter"},"title":"include_metrics","type":"array"},"ingest_tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"ingest_tls needs to be set if the exporter's IngestURL is pointing to a signalfx receiver\nwith TLS enabled and using a self-signed certificate where its CA is not loaded in the system cert pool.","title":"ingest_tls"},"ingest_url":{"description":"IngestURL is the destination to where SignalFx metrics will be sent to, it is\nintended for tests and debugging. The value of Realm is ignored if the\nURL is specified. The exporter will automatically append the appropriate\npath: \"/v2/datapoint\" for metrics, and \"/v2/event\" for events.","title":"ingest_url","type":"string"},"log_data_points":{"description":"Whether to log datapoints dispatched to Splunk Observability Cloud","title":"log_data_points","type":"boolean"},"log_dimension_updates":{"description":"Whether to log dimension updates being sent to SignalFx.","title":"log_dimension_updates","type":"boolean"},"max_connections":{"description":"MaxConnections is used to set a limit to the maximum idle HTTP connection the exporter can keep open.\nDeprecated: use HTTPClientSettings.MaxIdleConns or HTTPClientSettings.MaxIdleConnsPerHost instead.","title":"max_connections","type":"integer"},"max_conns_per_host":{"description":"MaxConnsPerHost limits the total number of connections per host, including connections in the dialing,\nactive, and idle states.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_conns_per_host","type":"integer"},"max_idle_conns":{"description":"MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns","type":"integer"},"max_idle_conns_per_host":{"description":"MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns_per_host","type":"integer"},"nonalphanumeric_dimension_chars":{"description":"NonAlphanumericDimensionChars is a list of allowable characters, in addition to alphanumeric ones,\nto be used in a dimension key.","title":"nonalphanumeric_dimension_chars","type":"string"},"read_buffer_size":{"description":"ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize.","title":"read_buffer_size","type":"integer"},"realm":{"description":"Realm is the SignalFx realm where data is going to be sent to.","title":"realm","type":"string"},"retry_on_failure":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.RetrySettings","title":"retry_on_failure"},"sending_queue":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.QueueSettings","title":"sending_queue"},"sync_host_metadata":{"description":"SyncHostMetadata defines if the exporter should scrape host metadata and\nsends it as property updates to SignalFx backend.\nIMPORTANT: Host metadata synchronization relies on `resourcedetection` processor.\n If this option is enabled make sure that `resourcedetection` processor\n is enabled in the pipeline with one of the cloud provider detectors\n or environment variable detector setting a unique value to\n `host.name` attribute within your k8s cluster. Also keep override\n And keep `override=true` in resourcedetection config.","title":"sync_host_metadata","type":"boolean"},"timeout":{"description":"Timeout parameter configures `http.Client.Timeout`.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"translation_rules":{"description":"TranslationRules defines a set of rules how to translate metrics to a SignalFx compatible format\nRules defined in translation/constants.go are used by default.\nDeprecated: Use metricstransform processor to do metrics transformations.","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.signalfxexporter.internal.translation.Rule"},"title":"translation_rules","type":"array"},"write_buffer_size":{"description":"WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize.","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.signalfxexporter.DimensionClientConfig":{"additionalProperties":false,"properties":{"idle_conn_timeout":{"title":"idle_conn_timeout","type":"string"},"max_buffered":{"title":"max_buffered","type":"integer"},"max_conns_per_host":{"title":"max_conns_per_host","type":"integer"},"max_idle_conns":{"title":"max_idle_conns","type":"integer"},"max_idle_conns_per_host":{"title":"max_idle_conns_per_host","type":"integer"},"send_delay":{"title":"send_delay","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.signalfxexporter.internal.correlation.Config":{"additionalProperties":false,"description":"Config defines configuration for correlation via traces.","properties":{"CustomRoundTripper":{"description":"Custom Round Tripper to allow for individual components to intercept HTTP requests","title":"CustomRoundTripper"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing HTTP calls.","title":"auth"},"cleanup_interval":{"title":"cleanup_interval","type":"string"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"endpoint":{"description":"The target URL to send data to (e.g.: http://some.url:9411/v1/traces).","title":"endpoint","type":"string"},"headers":{"description":"Additional headers attached to each HTTP request sent by the client.\nExisting header values are overwritten if collision happens.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"idle_conn_timeout":{"description":"IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"idle_conn_timeout","type":"string"},"log_updates":{"title":"log_updates","type":"boolean"},"max_buffered":{"title":"max_buffered","type":"integer"},"max_conns_per_host":{"description":"MaxConnsPerHost limits the total number of connections per host, including connections in the dialing,\nactive, and idle states.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_conns_per_host","type":"integer"},"max_idle_conns":{"description":"MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns","type":"integer"},"max_idle_conns_per_host":{"description":"MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns_per_host","type":"integer"},"max_requests":{"title":"max_requests","type":"integer"},"max_retries":{"title":"max_retries","type":"integer"},"read_buffer_size":{"description":"ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize.","title":"read_buffer_size","type":"integer"},"retry_delay":{"title":"retry_delay","type":"string"},"stale_service_timeout":{"description":"How long to wait after a trace span's service name is last seen before\nuncorrelating that service.","title":"stale_service_timeout","type":"string"},"sync_attributes":{"description":"SyncAttributes is a key of the span attribute name to sync to the dimension as the value.","patternProperties":{".*":{"type":"string"}},"title":"sync_attributes","type":"object"},"timeout":{"description":"Timeout parameter configures `http.Client.Timeout`.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"write_buffer_size":{"description":"WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize.","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.signalfxexporter.internal.translation.Rule":{"additionalProperties":false,"properties":{"action":{"description":"Action specifies the translation action to be applied on metrics.\nThis is a required field.","title":"action","type":"string"},"add_dimensions":{"description":"AddDimensions used by \"rename_metrics\" translation rule to add dimensions that are necessary for\nexisting SFx content for desired metric name","patternProperties":{".*":{"type":"string"}},"title":"add_dimensions","type":"object"},"aggregation_method":{"description":"AggregationMethod specifies method used by \"aggregate_metric\" translation rule","title":"aggregation_method","type":"string"},"copy_dimensions":{"description":"CopyDimensions used by \"rename_metrics\" translation rule to copy dimensions that are necessary for\nexisting SFx content for desired metric name. This will duplicate the dimension value and isn't a rename.","patternProperties":{".*":{"type":"string"}},"title":"copy_dimensions","type":"object"},"dimension_key":{"description":"DimensionKey is used by \"split_metric\" translation rule action to specify dimension key\nthat will be used to translate the metric datapoints. Datapoints that don't have\nthe specified dimension key will not be translated.\nDimensionKey is also used by \"copy_metrics\" for filterring.","title":"dimension_key","type":"string"},"dimension_pairs":{"description":"DimensionPairs used by \"drop_dimensions\" translation rule to specify dimension pairs that\nshould be dropped.","patternProperties":{".*":{"patternProperties":{".*":{"type":"boolean"}},"type":"object"}},"title":"dimension_pairs","type":"object"},"dimension_values":{"description":"DimensionValues is used by \"copy_metrics\" to filter out datapoints with dimensions values\nnot matching values set in this field","patternProperties":{".*":{"type":"boolean"}},"title":"dimension_values","type":"object"},"mapping":{"description":"Mapping specifies key/value mapping that is used by rename_dimension_keys,\nrename_metrics, copy_metrics, and split_metric actions.","patternProperties":{".*":{"type":"string"}},"title":"mapping","type":"object"},"metric_name":{"description":"MetricName is used by \"split_metric\" translation rule to specify a name\nof a metric that will be split.","title":"metric_name","type":"string"},"metric_names":{"description":"MetricNames is used by \"rename_dimension_keys\" and \"drop_metrics\" translation rules.","patternProperties":{".*":{"type":"boolean"}},"title":"metric_names","type":"object"},"operand1_metric":{"title":"operand1_metric","type":"string"},"operand2_metric":{"title":"operand2_metric","type":"string"},"operator":{"title":"operator","type":"string"},"scale_factors_float":{"description":"ScaleFactorsInt is used by multiply_float action to scale\nfloat metric values, key/value format: metric_name/scale_factor","patternProperties":{".*":{"type":"number"}},"title":"scale_factors_float","type":"object"},"scale_factors_int":{"description":"ScaleFactorsInt is used by multiply_int and divide_int action to scale\ninteger metric values, key/value format: metric_name/scale_factor","patternProperties":{".*":{"type":"integer"}},"title":"scale_factors_int","type":"object"},"types_mapping":{"description":"TypesMapping is represents metric_name/metric_type key/value pairs,\nused by ActionConvertValues.","patternProperties":{".*":{"type":"string"}},"title":"types_mapping","type":"object"},"without_dimensions":{"description":"WithoutDimensions used by \"aggregate_metric\" translation rule to specify dimensions to be\nexcluded by aggregation.","items":{"type":"string"},"title":"without_dimensions","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.signalfxexporter.internal.translation.dpfilters.MetricFilter":{"additionalProperties":false,"properties":{"dimensions":{"description":"A map of dimension key/values to match against. All key/values must\nmatch a datapoint for it to be matched. The map values can be either\na single string or a list of strings.","patternProperties":{".*":true},"title":"dimensions","type":"object"},"metric_name":{"description":"A single metric name to match against.","title":"metric_name","type":"string"},"metric_names":{"description":"A list of metric names to match against.","items":{"type":"string"},"title":"metric_names","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.signalfxexporter.internal.translation.dpfilters.PropertyFilter":{"additionalProperties":false,"description":"PropertyFilter is a collection of *StringFilter items used in determining if a given property (name and value) should be included with a dimension update request.","properties":{"dimension_name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.signalfxexporter.internal.translation.dpfilters.StringFilter","description":"DimensionName is the (inverted) literal, regex, or globbed dimension name/key to not target for dimension updates.\nIf there are no sub-property filters for its enclosing entry, it will disable dimension updates\nfor this dimension name in total.","title":"dimension_name"},"dimension_value":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.signalfxexporter.internal.translation.dpfilters.StringFilter","description":"PropertyValue is the (inverted) literal, regex, or globbed dimension value to not target with a dimension update\nIf there are no sub-property filters for its enclosing entry, it will disable dimension updates\nfor this dimension value in total.","title":"dimension_value"},"property_name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.signalfxexporter.internal.translation.dpfilters.StringFilter","description":"PropertyName is the (inverted) literal, regex, or globbed property name/key to not include in dimension updates","title":"property_name"},"property_value":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.signalfxexporter.internal.translation.dpfilters.StringFilter","description":"PropertyValue is the (inverted) literal or globbed property value to not include in dimension updates","title":"property_value"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.signalfxexporter.internal.translation.dpfilters.StringFilter":{"additionalProperties":false,"description":"StringFilter will match if any one of the given strings is a match.","properties":{},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.skywalkingexporter.Config":{"additionalProperties":false,"description":"Config defines configuration for SkyWalking exporter.","markdownDescription":"# SkyWalking gRPC Exporter\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: metrics, logs |\n| Distributions | [contrib] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n\u003c!-- end autogenerated section --\u003e\n\nExports data via gRPC using [skywalking-data-collect-protocol](https://github.com/apache/skywalking-data-collect-protocol) format. By default, this exporter requires TLS and offers queued retry capabilities.\n\n## Getting Started\n\nThe following settings are required:\n\n- `endpoint` (no default): host:port to which the exporter is going to send SkyWalking log data,\nusing the gRPC protocol. The valid syntax is described\n[here](https://github.com/grpc/grpc/blob/master/doc/naming.md).\nIf a scheme of `https` is used then client transport security is enabled and overrides the `insecure` setting.\n\n- `num_streams` (default = `2`): the number of grpc streams that send the gRPC requests.\n\nBy default, TLS is enabled and must be configured under `tls:`: \n\n- `insecure` (default = `false`): whether to enable client transport security for\n the exporter's connection.\n\nAs a result, the following parameters are also required under `tls:`:\n\n- `cert_file` (no default): path to the TLS cert to use for TLS required connections. Should\n only be used if `insecure` is set to false.\n- `key_file` (no default): path to the TLS key to use for TLS required connections. Should\n only be used if `insecure` is set to false.\n\nExample:\n\n```yaml\nexporters:\n skywalking:\n endpoint: \"192.168.1.5:11800\"\n tls:\n insecure: true \n num_streams: 5 \n skywalking/2:\n endpoint: \"10.18.7.4:11800\"\n compression: \"gzip\"\n tls:\n cert_file: file.cert\n key_file: file.key\n timeout: 10s\n```\n\n## Advanced Configuration\n\nSeveral helper files are leveraged to provide additional capabilities automatically:\n\n- [gRPC settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configgrpc/README.md)\n- [TLS and mTLS settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md)\n- [Queuing, retry and timeout settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md)","properties":{"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing RPCs.","title":"auth"},"balancer_name":{"description":"Sets the balancer in grpclb_policy to discover the servers. Default is pick_first.\nhttps://github.com/grpc/grpc-go/blob/master/examples/features/load_balancing/README.md","title":"balancer_name","type":"string"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"endpoint":{"description":"The target to which the exporter is going to send traces or metrics,\nusing the gRPC protocol. The valid syntax is described at\nhttps://github.com/grpc/grpc/blob/master/doc/naming.md.","title":"endpoint","type":"string"},"headers":{"description":"The headers associated with gRPC requests.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"keepalive":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configgrpc.KeepaliveClientConfig","description":"The keepalive parameters for gRPC client. See grpc.WithKeepaliveParams.\n(https://godoc.org/google.golang.org/grpc#WithKeepaliveParams).","title":"keepalive"},"num_streams":{"description":"The number of grpc streams that send the gRPC requests.","title":"num_streams","type":"integer"},"read_buffer_size":{"description":"ReadBufferSize for gRPC client. See grpc.WithReadBufferSize.\n(https://godoc.org/google.golang.org/grpc#WithReadBufferSize).","title":"read_buffer_size","type":"integer"},"retry_on_failure":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.RetrySettings","title":"retry_on_failure"},"sending_queue":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.QueueSettings","title":"sending_queue"},"timeout":{"description":"Timeout is the timeout for every attempt to send data to the backend.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"wait_for_ready":{"description":"WaitForReady parameter configures client to wait for ready state before sending data.\n(https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md)","title":"wait_for_ready","type":"boolean"},"write_buffer_size":{"description":"WriteBufferSize for gRPC gRPC. See grpc.WithWriteBufferSize.\n(https://godoc.org/google.golang.org/grpc#WithWriteBufferSize).","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.splunkhecexporter.Config":{"additionalProperties":false,"description":"Config defines configuration for Splunk exporter.","properties":{"CustomRoundTripper":{"description":"Custom Round Tripper to allow for individual components to intercept HTTP requests","title":"CustomRoundTripper"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing HTTP calls.","title":"auth"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"disable_compression":{"description":"Disable GZip compression. Defaults to false.","title":"disable_compression","type":"boolean"},"endpoint":{"description":"The target URL to send data to (e.g.: http://some.url:9411/v1/traces).","title":"endpoint","type":"string"},"export_raw":{"description":"ExportRaw to send only the log's body, targeting a Splunk HEC raw endpoint.","title":"export_raw","type":"boolean"},"headers":{"description":"Additional headers attached to each HTTP request sent by the client.\nExisting header values are overwritten if collision happens.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"health_check_enabled":{"description":"HecHealthCheckEnabled can be used to verify Splunk HEC health on exporter's startup","title":"health_check_enabled","type":"boolean"},"health_path":{"description":"HealthPath for health API, default is '/services/collector/health'","title":"health_path","type":"string"},"heartbeat":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.splunkhecexporter.HecHeartbeat","description":"Heartbeat is the configuration to enable heartbeat","title":"heartbeat"},"hec_metadata_to_otel_attrs":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.internal.splunk.HecToOtelAttrs","description":"HecToOtelAttrs creates a mapping from attributes to HEC specific metadata: source, sourcetype, index and host.","title":"hec_metadata_to_otel_attrs"},"idle_conn_timeout":{"description":"IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"idle_conn_timeout","type":"string"},"index":{"description":"Splunk index, optional name of the Splunk index.","title":"index","type":"string"},"log_data_enabled":{"description":"LogDataEnabled can be used to disable sending logs by the exporter.","title":"log_data_enabled","type":"boolean"},"max_connections":{"description":"MaxConnections is used to set a limit to the maximum idle HTTP connection the exporter can keep open. Defaults to 100.\nDeprecated: use HTTPClientSettings.MaxIdleConns or HTTPClientSettings.MaxIdleConnsPerHost instead.","title":"max_connections","type":"integer"},"max_conns_per_host":{"description":"MaxConnsPerHost limits the total number of connections per host, including connections in the dialing,\nactive, and idle states.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_conns_per_host","type":"integer"},"max_content_length_logs":{"description":"Maximum log payload size in bytes. Default value is 2097152 bytes (2MiB).\nMaximum allowed value is 838860800 (~ 800 MB).","title":"max_content_length_logs","type":"integer"},"max_content_length_metrics":{"description":"Maximum metric payload size in bytes. Default value is 2097152 bytes (2MiB).\nMaximum allowed value is 838860800 (~ 800 MB).","title":"max_content_length_metrics","type":"integer"},"max_content_length_traces":{"description":"Maximum trace payload size in bytes. Default value is 2097152 bytes (2MiB).\nMaximum allowed value is 838860800 (~ 800 MB).","title":"max_content_length_traces","type":"integer"},"max_event_size":{"description":"Maximum payload size, raw uncompressed. Default value is 5242880 bytes (5MiB).\nMaximum allowed value is 838860800 (~ 800 MB).","title":"max_event_size","type":"integer"},"max_idle_conns":{"description":"MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns","type":"integer"},"max_idle_conns_per_host":{"description":"MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns_per_host","type":"integer"},"otel_to_hec_fields":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.splunkhecexporter.OtelToHecFields","description":"HecFields creates a mapping from attributes to HEC fields.","title":"otel_to_hec_fields"},"profiling_data_enabled":{"description":"ProfilingDataEnabled can be used to disable sending profiling data by the exporter.","title":"profiling_data_enabled","type":"boolean"},"read_buffer_size":{"description":"ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize.","title":"read_buffer_size","type":"integer"},"retry_on_failure":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.RetrySettings","title":"retry_on_failure"},"sending_queue":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.QueueSettings","title":"sending_queue"},"source":{"description":"Optional Splunk source: https://docs.splunk.com/Splexicon:Source.\nSources identify the incoming data.","title":"source","type":"string"},"sourcetype":{"description":"Optional Splunk source type: https://docs.splunk.com/Splexicon:Sourcetype.","title":"sourcetype","type":"string"},"splunk_app_name":{"description":"App name is used to track telemetry information for Splunk App's using HEC by App name. Defaults to \"OpenTelemetry Collector Contrib\".","title":"splunk_app_name","type":"string"},"splunk_app_version":{"description":"App version is used to track telemetry information for Splunk App's using HEC by App version. Defaults to the current OpenTelemetry Collector Contrib build version.","title":"splunk_app_version","type":"string"},"telemetry":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.splunkhecexporter.HecTelemetry","description":"Telemetry is the configuration for splunk hec exporter telemetry","title":"telemetry"},"timeout":{"description":"Timeout parameter configures `http.Client.Timeout`.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"token":{"description":"HEC Token is the authentication token provided by Splunk: https://docs.splunk.com/Documentation/Splunk/latest/Data/UsetheHTTPEventCollector.","title":"token","type":"string"},"use_multi_metric_format":{"description":"UseMultiMetricFormat combines metric events to save space during ingestion.","title":"use_multi_metric_format","type":"boolean"},"write_buffer_size":{"description":"WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize.","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.splunkhecexporter.HecHeartbeat":{"additionalProperties":false,"description":"HecHeartbeat defines the heartbeat information for the exporter","properties":{"interval":{"description":"Interval represents the time interval for the heartbeat interval. If nothing or 0 is set,\nheartbeat is not enabled.\nA heartbeat is an event sent to _internal index with metadata for the current collector/host.","title":"interval","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.splunkhecexporter.HecTelemetry":{"additionalProperties":false,"description":"HecTelemetry defines the telemetry configuration for the exporter","properties":{"enabled":{"description":"Enabled is the bool to enable telemetry inside splunk hec exporter","title":"enabled","type":"boolean"},"extra_attributes":{"description":"ExtraAttributes is the extra attributes for metrics inside splunk hex exporter","patternProperties":{".*":{"type":"string"}},"title":"extra_attributes","type":"object"},"override_metrics_names":{"description":"OverrideMetricsNames is the map to override metrics for internal metrics in splunk hec exporter","patternProperties":{".*":{"type":"string"}},"title":"override_metrics_names","type":"object"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.splunkhecexporter.OtelToHecFields":{"additionalProperties":false,"description":"OtelToHecFields defines the mapping of attributes to HEC fields","properties":{"severity_number":{"description":"SeverityNumber informs the exporter to map the severity number field to a specific HEC field.","title":"severity_number","type":"string"},"severity_text":{"description":"SeverityText informs the exporter to map the severity text field to a specific HEC field.","title":"severity_text","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.sumologicexporter.Config":{"additionalProperties":false,"description":"Config defines configuration for Sumo Logic exporter.","markdownDescription":"# Sumo Logic Exporter\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: metrics, logs |\n| Distributions | [contrib] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n\u003c!-- end autogenerated section --\u003e\n\n## Migration to new architecture\n\n**This exporter is undergoing major changes right now.**\n\nFor some time we have been developing the [new Sumo Logic exporter](https://github.com/SumoLogic/sumologic-otel-collector/tree/main/pkg/exporter/sumologicexporter#sumo-logic-exporter) and now we are in the process of moving it into this repository.\n\nThe following options are deprecated and they will not exist in the new version:\n\n- `metric_format: {carbon2, graphite}`\n- `metadata_attributes: [\u003cregex\u003e]`\n- `graphite_template: \u003ctemplate\u003e`\n- `source_category: \u003ctemplate\u003e`\n- `source_name: \u003ctemplate\u003e`\n- `source_host: \u003ctemplate\u003e`\n\nAfter the new exporter will be moved to this repository:\n\n- `carbon2` and `graphite` are going to be no longer supported and `prometheus` or `otlp` format should be used\n- all resource level attributes are going to be treated as `metadata_attributes`. You can use [Group by Attributes processor](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/groupbyattrsprocessor) to move attributes from record level to resource level. For example:\n\n ```yaml\n # before switch to new collector\n exporters:\n sumologic:\n metadata_attribute:\n - my_attribute\n # after switch to new collector\n processors:\n groupbyattrs:\n keys:\n - my_attribute\n ```\n\n- Source templates (`source_category`, `source_name` and `source_host`) are going to be removed from the exporter and sources may be set using `_sourceCategory`, `sourceName` or `_sourceHost` resource attributes. We recommend to use [Transform Processor](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/transformprocessor/). For example:\n\n ```yaml\n # before switch to new collector\n exporters:\n sumologic:\n source_category: \"%{foo}/constant/%{bar}\"\n # after switch to new collector\n processors:\n transformprocessor:\n log_statements:\n context: log\n statements:\n # set default value to unknown\n - set(attributes[\"foo\"], \"unknown\") where attributes[\"foo\"] == nil\n - set(attributes[\"bar\"], \"unknown\") where attributes[\"foo\"] == nil\n # set _sourceCategory as \"%{foo}/constant/%{bar}\"\n - set(resource.attributes[\"_sourceCategory\"], Concat([attributes[\"foo\"], \"/constant/\", attributes[\"bar\"]], \"\"))\n ```\n\n## Configuration\n\nThis exporter supports sending logs and metrics data to [Sumo Logic](https://www.sumologic.com/).\nTraces are exported using native otlphttp exporter as described\n[here](https://help.sumologic.com/Traces/Getting_Started_with_Transaction_Tracing)\n\nConfiguration is specified via the yaml in the following structure:\n\n```yaml\nexporters:\n # ...\n sumologic:\n # unique URL generated for your HTTP Source, this is the address to send data to\n endpoint: \u003cHTTP_Source_URL\u003e\n # Compression encoding format, empty string means no compression, default = gzip\n compress_encoding: {gzip, deflate, \"\"}\n # max HTTP request body size in bytes before compression (if applied),\n # default = 1_048_576 (1MB)\n max_request_body_size: \u003cmax_request_body_size\u003e\n\n # List of regexes for attributes which should be send as metadata\n # default = []\n #\n # This option is deprecated:\n # https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/sumologicexporter#migration-to-new-architecture\n metadata_attributes: [\u003cregex\u003e]\n\n # format to use when sending logs to Sumo Logic, default = json,\n log_format: {json, text}\n\n # format to use when sending metrics to Sumo Logic, default = prometheus,\n #\n # carbon2 and graphite are deprecated:\n # https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/sumologicexporter#migration-to-new-architecture\n metric_format: {carbon2, graphite, prometheus}\n\n # Template for Graphite format.\n # this option affects graphite format only\n # By default this is \"%{_metric_}\".\n #\n # Please regfer to Source temmplates for formatting explanation:\n # https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/sumologicexporter#source-templates\n #\n # This option is deprecated:\n # https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/sumologicexporter#migration-to-new-architecture\n graphite_template: \u003ctemplate\u003e\n\n # Desired source category. Useful if you want to override the source category configured for the source.\n #\n # Please regfer to Source temmplates for formatting explanation:\n # https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/sumologicexporter#source-templates\n #\n # This option is deprecated:\n # https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/sumologicexporter#migration-to-new-architecture\n source_category: \u003ctemplate\u003e\n\n # Desired source name. Useful if you want to override the source name configured for the source.\n #\n # Please regfer to Source temmplates for formatting explanation:\n # https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/sumologicexporter#source-templates\n #\n # This option is deprecated:\n # https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/sumologicexporter#migration-to-new-architecture\n source_name: \u003ctemplate\u003e\n\n # Desired source host. Useful if you want to override the source hosy configured for the source.\n #\n # Please regfer to Source temmplates for formatting explanation:\n # https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/sumologicexporter#source-templates\n #\n # This option is deprecated:\n # https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/sumologicexporter#migration-to-new-architecture\n source_host: \u003ctemplate\u003e\n\n # timeout is the timeout for every attempt to send data to the backend,\n # maximum connection timeout is 55s, default = 5s\n timeout: \u003ctimeout\u003e\n\n # for below described queueing and retry related configuration please refer to:\n # https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md#configuration\n\n retry_on_failure:\n # default = true\n enabled: {true, false}\n # time to wait after the first failure before retrying;\n # ignored if enabled is false, default = 5s\n initial_interval: \u003cinitial_interval\u003e\n # is the upper bound on backoff; ignored if enabled is false, default = 30s\n max_interval: \u003cmax_interval\u003e\n # is the maximum amount of time spent trying to send a batch;\n # ignored if enabled is false, default = 120s\n max_elapsed_time: \u003cmax_elapsed_time\u003e\n\n sending_queue:\n # default = false\n enabled: {true, false}\n # number of consumers that dequeue batches; ignored if enabled is false,\n # default = 10\n num_consumers: \u003cnum_consumers\u003e\n # when set, enables persistence and uses the component specified as a storage extension for the persistent queue\n # make sure to configure and add a `file_storage` extension in `service.extensions`.\n # default = None\n storage: \u003cstorage_name\u003e\n # maximum number of batches kept in memory before data;\n # ignored if enabled is false, default = 1000\n #\n # user should calculate this as num_seconds * requests_per_second where:\n # num_seconds is the number of seconds to buffer in case of a backend outage,\n # requests_per_second is the average number of requests per seconds.\n queue_size: \u003cqueue_size\u003e\n```\n\n## Source Templates\n\nYou can specify a template with an attribute for `source_category`, `source_name`, `source_host` or `graphite_template` using `%{attr_name}`.\n\nFor example, when there is an attribute `my_attr`: `my_value`, `metrics/%{my_attr}` would be expanded to `metrics/my_value`.\n\nFor `graphite_template`, in addition to above, `%{_metric_}` is going to be replaced with metric name.\n\n## Example Configuration\n\n```yaml\nexporters:\n sumologic:\n endpoint: http://localhost:3000\n compress_encoding: \"gzip\"\n max_request_body_size: \"1_048_576\" # 1MB\n log_format: \"text\"\n metric_format: \"prometheus\"\n source_category: \"custom category\"\n source_name: \"custom name\"\n source_host: \"custom host\"\n metadata_attributes:\n - k8s.*\n```","properties":{"CustomRoundTripper":{"description":"Custom Round Tripper to allow for individual components to intercept HTTP requests","title":"CustomRoundTripper"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing HTTP calls.","title":"auth"},"client":{"description":"Name of the client","title":"client","type":"string"},"compress_encoding":{"description":"Compression encoding format, either empty string, gzip or deflate (default gzip)\nEmpty string means no compression","title":"compress_encoding","type":"string"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"endpoint":{"description":"The target URL to send data to (e.g.: http://some.url:9411/v1/traces).","title":"endpoint","type":"string"},"graphite_template":{"description":"Graphite template.\nPlaceholders `%{attr_name}` will be replaced with attribute value for attr_name.","title":"graphite_template","type":"string"},"headers":{"description":"Additional headers attached to each HTTP request sent by the client.\nExisting header values are overwritten if collision happens.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"idle_conn_timeout":{"description":"IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"idle_conn_timeout","type":"string"},"log_format":{"description":"Logs related configuration\nFormat to post logs into Sumo. (default json)\n * text - Logs will appear in Sumo Logic in text format.\n * json - Logs will appear in Sumo Logic in json format.","title":"log_format","type":"string"},"max_conns_per_host":{"description":"MaxConnsPerHost limits the total number of connections per host, including connections in the dialing,\nactive, and idle states.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_conns_per_host","type":"integer"},"max_idle_conns":{"description":"MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns","type":"integer"},"max_idle_conns_per_host":{"description":"MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns_per_host","type":"integer"},"max_request_body_size":{"description":"Max HTTP request body size in bytes before compression (if applied).\nBy default 1MB is recommended.","title":"max_request_body_size","type":"integer"},"metadata_attributes":{"description":"List of regexes for attributes which should be send as metadata","items":{"type":"string"},"title":"metadata_attributes","type":"array"},"metric_format":{"description":"Metrics related configuration\nThe format of metrics you will be sending, either graphite or carbon2 or prometheus (Default is prometheus)\nPossible values are `carbon2` and `prometheus`","title":"metric_format","type":"string"},"read_buffer_size":{"description":"ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize.","title":"read_buffer_size","type":"integer"},"retry_on_failure":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.RetrySettings","title":"retry_on_failure"},"sending_queue":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.QueueSettings","title":"sending_queue"},"source_category":{"description":"Sumo specific options\nDesired source category.\nUseful if you want to override the source category configured for the source.\nPlaceholders `%{attr_name}` will be replaced with attribute value for attr_name.","title":"source_category","type":"string"},"source_host":{"description":"Desired host name.\nUseful if you want to override the source host configured for the source.\nPlaceholders `%{attr_name}` will be replaced with attribute value for attr_name.","title":"source_host","type":"string"},"source_name":{"description":"Desired source name.\nUseful if you want to override the source name configured for the source.\nPlaceholders `%{attr_name}` will be replaced with attribute value for attr_name.","title":"source_name","type":"string"},"timeout":{"description":"Timeout parameter configures `http.Client.Timeout`.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"write_buffer_size":{"description":"WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize.","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.tanzuobservabilityexporter.Config":{"additionalProperties":false,"description":"Config defines configuration options for the exporter.","properties":{"metrics":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.tanzuobservabilityexporter.MetricsConfig","title":"metrics"},"retry_on_failure":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.RetrySettings","title":"retry_on_failure"},"sending_queue":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.QueueSettings","title":"sending_queue"},"traces":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.tanzuobservabilityexporter.TracesConfig","description":"Traces defines the Traces exporter specific configuration","title":"traces"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.tanzuobservabilityexporter.MetricsConfig":{"additionalProperties":false,"properties":{"CustomRoundTripper":{"description":"Custom Round Tripper to allow for individual components to intercept HTTP requests","title":"CustomRoundTripper"},"app_tags_excluded":{"description":"AppTagsExcluded will exclude the Resource Attributes `application`, `service.name` -\u003e (service),\n`cluster`, and `shard` from the transformed TObs metric if set to true.","title":"app_tags_excluded","type":"boolean"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing HTTP calls.","title":"auth"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"endpoint":{"description":"The target URL to send data to (e.g.: http://some.url:9411/v1/traces).","title":"endpoint","type":"string"},"headers":{"description":"Additional headers attached to each HTTP request sent by the client.\nExisting header values are overwritten if collision happens.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"idle_conn_timeout":{"description":"IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"idle_conn_timeout","type":"string"},"max_conns_per_host":{"description":"MaxConnsPerHost limits the total number of connections per host, including connections in the dialing,\nactive, and idle states.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_conns_per_host","type":"integer"},"max_idle_conns":{"description":"MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns","type":"integer"},"max_idle_conns_per_host":{"description":"MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns_per_host","type":"integer"},"read_buffer_size":{"description":"ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize.","title":"read_buffer_size","type":"integer"},"resource_attrs_included":{"title":"resource_attrs_included","type":"boolean"},"timeout":{"description":"Timeout parameter configures `http.Client.Timeout`.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"write_buffer_size":{"description":"WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize.","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.tanzuobservabilityexporter.TracesConfig":{"additionalProperties":false,"markdownDescription":"# Tanzu Observability (Wavefront) Exporter\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: traces, metrics |\n| Distributions | [contrib] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n\u003c!-- end autogenerated section --\u003e\n\nThis exporter supports sending metrics and traces to [Tanzu Observability](https://tanzu.vmware.com/observability).\n\n## Prerequisites\n\n- [Obtain the Tanzu Observability by Wavefront API token.](https://docs.wavefront.com/wavefront_api.html#generating-an-api-token)\n- [Set up and start a Tanzu Observability by Wavefront proxy](https://docs.wavefront.com/proxies_installing.html) and\n configure it with the API token you obtained.\n- To have the proxy generate [span RED metrics](https://docs.wavefront.com/trace_data_details.html#red-metrics) from\n trace data, [configure](https://docs.wavefront.com/proxies_configuring.html) the proxy to receive traces by\n setting `customTracingListenerPorts=30001`. For metrics, the proxy listens on port 2878 by default.\n\n## Configuration\n\nGiven a Wavefront proxy at 10.10.10.10 configured with `customTracingListenerPorts=30001`, a basic configuration of\nthe Tanzu Observability exporter follows:\n\n```yaml\nreceivers:\n examplereceiver:\n\nprocessors:\n batch:\n timeout: 10s\n\nexporters:\n tanzuobservability:\n traces:\n endpoint: \"http://10.10.10.10:30001\"\n metrics:\n endpoint: \"http://10.10.10.10:2878\"\n\nservice:\n pipelines:\n traces:\n receivers: [ examplereceiver ]\n processors: [ batch ]\n exporters: [ tanzuobservability ]\n metrics:\n receivers: [ examplereceiver ]\n processors: [ batch ]\n exporters: [ tanzuobservability ]\n```\n\n## Advanced Configuration\n\n### Resource Attributes on Metrics\n\nClient programs using an OpenTelemetry SDK can be configured to wrap all emitted telemetry (metrics, spans, logs) with\na set of global key-value pairs,\ncalled [resource attributes](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/sdk.md)\n.\nBy default, the Tanzu Observability Exporter includes resource attributes on spans but _excludes_ them on metrics. To\ninclude resource attributes as tags on metrics, set the flag `resource_attrs_included` to `true` as per the example\nbelow.\n\n**Note:** Tanzu Observability has a 254-character limit on tag key-value pairs. If a resource attribute exceeds this\nlimit, the metric will not show up in Tanzu Observability.\n\n### Application Resource Attributes on Metrics\n\nThe Tanzu Observability Exporter will\ninclude [application resource attributes](https://docs.wavefront.com/trace_data_details.html#application-tags) on\nmetrics (`application`, `service.name`\n, `cluster`, and `shard`). To exclude these resource\nattributes as tags on metrics, set the flag `app_tags_excluded` to `true` as per the example\nbelow.\n\n**Note:** A tag `service.name`(if provided) becomes `service` on the transformed wavefront metric. However, if both the\ntags (`service` \u0026 `service.name`) are provided then the `service` tag will be included.\n\n### Queuing and Retries\n\nThis exporter uses OpenTelemetry Collector helpers to queue data and retry on failures.\n\n* `retry_on_failure` [Details and defaults here](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md#configuration)\n .\n* `sending_queue` [Details and defaults here](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md#configuration)\n\n### Recommended Pipeline Processors\n\nThe memory_limiter processor is recommended to prevent out of memory situations on the collector. It allows performing\nperiodic checks of memory usage – if it exceeds defined limits it will begin dropping data and forcing garbage\ncollection to reduce memory\nconsumption. [Details and defaults here](https://github.com/open-telemetry/opentelemetry-collector/blob/main/processor/memorylimiterprocessor/README.md)\n.\n\n**Note:** The order matters when enabling multiple processors in a pipeline (e.g. the memory limiter and batch\nprocessors in the example config below). Please refer to the\nprocessors' [documentation](https://github.com/open-telemetry/opentelemetry-collector/tree/main/processor)\nfor more information.\n\n### Example Advanced Configuration\n\n```yaml\nreceivers:\n examplereceiver:\n\nprocessors:\n memory_limiter:\n check_interval: 1s\n limit_percentage: 50\n spike_limit_percentage: 30\n batch:\n timeout: 10s\n\nexporters:\n tanzuobservability:\n traces:\n endpoint: \"http://10.10.10.10:30001\"\n metrics:\n endpoint: \"http://10.10.10.10:2878\"\n resource_attrs_included: true\n app_tags_excluded: true\n retry_on_failure:\n max_elapsed_time: 3m\n sending_queue:\n queue_size: 10000\n\nservice:\n pipelines:\n traces:\n receivers: [ examplereceiver ]\n processors: [ memory_limiter, batch ]\n exporters: [ tanzuobservability ]\n metrics:\n receivers: [ examplereceiver ]\n processors: [ memory_limiter, batch ]\n exporters: [ tanzuobservability ]\n```\n\n## Attributes Required by Tanzu Observability\n\n### Source\n\nA `source` field is required in Tanzu\nObservability [spans](https://docs.wavefront.com/trace_data_details.html#span-fields)\nand [metrics](https://docs.wavefront.com/wavefront_data_format.html#wavefront-data-format-fields). The source is set to\nthe\nfirst matching OpenTelemetry Resource Attribute:\n\n1. `source`\n2. `host.name`\n3. `hostname`\n4. `host.id`\n\nTo reduce duplicate data, the matched attribute is excluded from the tags on the exported Tanzu Observability span or\nmetric.\nIf none of the above resource attributes exist, the OpenTelemetry Collector's hostname is used as a fallback for source.\n\n### Application Identity Tags on Spans\n\n[Application identity tags](https://docs.wavefront.com/trace_data_details.html#how-wavefront-uses-application-tags) of\n`application` and `service` are required for all spans in Tanzu Observability.\n\n- `application` is set to the value of the attribute `application` on the OpenTelemetry Span or Resource. Default is \"\n defaultApp\".\n- `service` is set the value of the attribute `service` or `service.name` on the OpenTelemetry Span or Resource. Default\n is \"defaultService\".\n\n## Data Conversion for Traces\n\n- Trace IDs and Span IDs are converted to UUIDs. For example, span IDs are left-padded with zeros to fit the correct\n size.\n- Events are converted to [Span Logs](https://docs.wavefront.com/trace_data_details.html#span-logs).\n- Kind is converted to the `span.kind` tag.\n- If a Span's status code is error, a tag of `error=true` is added. If the status also has a description, it's set\n to `otel.status_description`.\n- TraceState is converted to the `w3c.tracestate` tag.\n\n## Data Conversion for Metrics\n\nThis section describes the process used by the Exporter when converting from\n[OpenTelemetry Metrics](https://opentelemetry.io/docs/reference/specification/metrics/datamodel) to\n[Tanzu Observability by Wavefront Metrics](https://docs.wavefront.com/metric_types.html).\n\n| OpenTelemetry Metric Type | Wavefront Metric Type | Notes |\n| ------ | ------ | ------ |\n| Gauge | Gauge |\n| Cumulative Sum | Cumulative Counter |\n| Delta Sum | Delta Counter |\n| Cumulative Histogram (incl. Exponential) | Cumulative Counters | [Details below](#cumulative-histogram-conversion-incl-exponential). |\n| Delta Histogram (incl. Exponential) | Histogram |\n| Summary | Gauges | [Details below](#summary-conversion).\n\n### Cumulative Histogram Conversion (incl. Exponential)\n\nA cumulative histogram is converted to multiple counter metrics: one counter per bucket in the histogram. Each counter\nhas a special \"le\" tag that matches the upper bound of the corresponding bucket. The value of the counter metric is the\nsum of the histogram's corresponding bucket and all the buckets before it.\n\nWhen working with OpenTelemetry Cumulative Histograms that have been converted to Wavefront Counters, these functions\nwill be of use:\n\n- [cumulativeHisto()](https://docs.wavefront.com/ts_cumulativeHisto.html)\n- [cumulativePercentile()](https://docs.wavefront.com/ts_cumulativePercentile.html)\n\n#### Example\n\nSuppose a cumulative histogram named \"http.response_times\" has\nthe following buckets and values:\n\n| Bucket | Value |\n| ------ | ----- |\n| \u0026le; 100ms | 5 |\n| \u0026gt; 100ms to \u0026le; 200ms | 20 |\n| \u0026gt; 200ms | 100 |\n\nThe exporter sends the following metrics to tanzuobservability:\n\n| Name | Tags | Value |\n| ---- | ---- | ----- |\n| http.response_times | le=\"100\" | 5 |\n| http.response_times | le=\"200\" | 25 |\n| http.response_times | le=\"+Inf\" | 125 |\n\n#### Example WQL Query on a Cumulative Histogram\n\nUsing the cumulative histogram from the section above, this WQL query will produce a graph showing\nthe 95th percentile of http response times in the last 15 minutes.\n\n```\ncumulativePercentile(95, mavg(15m, deriv(sum(ts(http.reponse_times), le))))\n```\n\nThe sum function aggregates the http response times and groups them by the le tag. Since\nhttp.response_times has three buckets, the sum() function will graph three lines, one for each bucket.\nderiv() shows the per second rate of change in the three lines from sum. The mavg function averages\nthe rates of change of the three lines over the last 15 minutes. Since the rates of change are per\nsecond, if you multiply the average rate of change for a bucket by 900, you get the number of new\nhttp requests falling into that bucket in the last 15 minutes. Finally, cumulativePercentile\nuses the values of the `le` tags, which are http response times, and linear interpolation of the\nbucket counts to estimate the 95th percentile of http.response_times over the last 15 minutes.\n\n### Summary Conversion\n\nA summary is converted to multiple gauge metrics: one gauge for every quantile in the summary. A special \"quantile\" tag\ncontains avalue between 0 and 1 indicating the quantile for which the value belongs.\n\n[beta]:https://github.com/open-telemetry/opentelemetry-collector#beta\n\n[contrib]:https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib","properties":{"CustomRoundTripper":{"description":"Custom Round Tripper to allow for individual components to intercept HTTP requests","title":"CustomRoundTripper"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing HTTP calls.","title":"auth"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"endpoint":{"description":"The target URL to send data to (e.g.: http://some.url:9411/v1/traces).","title":"endpoint","type":"string"},"headers":{"description":"Additional headers attached to each HTTP request sent by the client.\nExisting header values are overwritten if collision happens.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"idle_conn_timeout":{"description":"IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"idle_conn_timeout","type":"string"},"max_conns_per_host":{"description":"MaxConnsPerHost limits the total number of connections per host, including connections in the dialing,\nactive, and idle states.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_conns_per_host","type":"integer"},"max_idle_conns":{"description":"MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns","type":"integer"},"max_idle_conns_per_host":{"description":"MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns_per_host","type":"integer"},"read_buffer_size":{"description":"ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize.","title":"read_buffer_size","type":"integer"},"timeout":{"description":"Timeout parameter configures `http.Client.Timeout`.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"write_buffer_size":{"description":"WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize.","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.tencentcloudlogserviceexporter.Config":{"additionalProperties":false,"description":"Config defines configuration for TencentCloud Log Service exporter.","markdownDescription":"# TencentCloud LogService Exporter\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: logs |\n| Distributions | [contrib] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n\u003c!-- end autogenerated section --\u003e\n\nThis exporter supports sending OpenTelemetry log data to [LogService](https://cloud.tencent.com/product/cls).\n\n# Configuration options:\n\n- `region` (required): LogService's [Region](https://cloud.tencent.com/document/product/614/56473).\n- `logset` (required): LogService's LogSet ID.\n- `topic` (required): LogService's topic ID.\n- `secret_id` (optional): TencentCloud secret id.\n- `secret_key` (optional): TencentCloud secret key.\n\n# Example:\n## Simple Log Data\n\n```yaml\nreceivers:\n otlp:\n protocols:\n grpc:\n endpoint: \":4317\"\n\nexporters:\n tencentcloud_logservice:\n # LogService's Region, https://cloud.tencent.com/document/product/614/18940\n # set cls.{region}.tencentcloudapi.com, eg cls.ap-beijing.tencentcloudapi.com;\n region: \"ap-beijing\"\n # LogService's LogSet ID\n logset: \"demo-logset\"\n # LogService's Topic ID\n topic: \"demo-topic\"\n # TencentCloud secret id\n secret_id: \"demo-secret-id\"\n # TencentCloud secret key\n secret_key: \"demo-secret-key\"\n\nservice:\n pipelines:\n logs:\n receivers: [otlp]\n exporters: [tencentcloud_logservice]\n```\n\n# Changelog\n\n- 2021-11-10 Change configuration item **endpoint** to **region**, by @wgliang\n- 2021-11-01 Initial implementation by @wgliang in #5722","properties":{"logset":{"description":"LogService's LogSet Name","title":"logset","type":"string"},"region":{"description":"LogService's Region, https://cloud.tencent.com/document/product/614/18940\nfor TencentCloud Kubernetes(or CVM), set ap-{region}.cls.tencentyun.com, eg ap-beijing.cls.tencentyun.com;\n others set ap-{region}.cls.tencentcs.com, eg ap-beijing.cls.tencentcs.com","title":"region","type":"string"},"secret_id":{"description":"TencentCloud access key id","title":"secret_id","type":"string"},"secret_key":{"description":"TencentCloud access key secret","title":"secret_key","type":"string"},"topic":{"description":"LogService's Topic Name","title":"topic","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.exporter.zipkinexporter.Config":{"additionalProperties":false,"description":"Config defines configuration settings for the Zipkin exporter.","markdownDescription":"# Zipkin Exporter\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: traces |\n| Distributions | [core], [contrib], [observiq] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nExports data to a [Zipkin](https://zipkin.io/) back-end.\nBy default, this exporter requires TLS and offers queued retry capabilities.\n\n## Getting Started\n\nThe following settings are required:\n\n- `endpoint` (no default): URL to which the exporter is going to send Zipkin trace data. For example: `http://localhost:9411/api/v2/spans`.\n\nThe following settings are optional:\n\n- `format` (default = `json`): The format to sent events in. Can be set to `json` or `proto`.\n- `default_service_name` (default = `\u003cmissing service name\u003e`): What to name\n services missing this information.\n\nTo use TLS, specify `https://` as the protocol scheme in the URL passed to the `endpoint` property.\nSee [Advanced Configuration](#advanced-configuration) for more TLS options.\n\nExample:\n\n```yaml\nexporters:\n zipkin/nontls:\n endpoint: \"http://some.url:9411/api/v2/spans\"\n format: proto\n default_service_name: unknown-service\n\n zipkin/withtls:\n endpoint: \"https://some.url:9411/api/v2/spans\"\n\n zipkin/tlsnoverify:\n endpoint: \"https://some.url:9411/api/v2/spans\"\n tls:\n insecure_skip_verify: true\n```\n\n## Advanced Configuration\n\nSeveral helper files are leveraged to provide additional capabilities automatically:\n\n- [HTTP client settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/confighttp/README.md#client-configuration)\n- [TLS and mTLS settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md)\n- [Queuing, retry and timeout settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md)","properties":{"CustomRoundTripper":{"description":"Custom Round Tripper to allow for individual components to intercept HTTP requests","title":"CustomRoundTripper"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing HTTP calls.","title":"auth"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"default_service_name":{"title":"default_service_name","type":"string"},"endpoint":{"description":"The target URL to send data to (e.g.: http://some.url:9411/v1/traces).","title":"endpoint","type":"string"},"format":{"title":"format","type":"string"},"headers":{"description":"Additional headers attached to each HTTP request sent by the client.\nExisting header values are overwritten if collision happens.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"idle_conn_timeout":{"description":"IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"idle_conn_timeout","type":"string"},"max_conns_per_host":{"description":"MaxConnsPerHost limits the total number of connections per host, including connections in the dialing,\nactive, and idle states.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_conns_per_host","type":"integer"},"max_idle_conns":{"description":"MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns","type":"integer"},"max_idle_conns_per_host":{"description":"MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns_per_host","type":"integer"},"read_buffer_size":{"description":"ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize.","title":"read_buffer_size","type":"integer"},"retry_on_failure":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.RetrySettings","title":"retry_on_failure"},"sending_queue":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.QueueSettings","title":"sending_queue"},"timeout":{"description":"Timeout parameter configures `http.Client.Timeout`.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"write_buffer_size":{"description":"WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize.","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.extension.asapauthextension.Config":{"additionalProperties":false,"markdownDescription":"# ASAP Client Authentication Extension\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta] |\n| Distributions | [contrib], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThis extension provides [Atlassian Service Authentication Protocol](https://s2sauth.bitbucket.io/) (ASAP) client \ncredentials for HTTP or gRPC based exporters. \n\n## Example Configuration\n\n```yaml\nextensions:\n asapclient:\n # The `kid` as specified by the asap specification.\n key_id: somekeyid\n # The `iss` as specified by the asap specification.\n issuer: someissuer\n # The `aud` as specified by the asap specification.\n audience:\n - someservice\n - someotherservice\n # The private key of the client, used to sign the token. For an example, see `testdata/config.yaml`.\n private_key: ${env:ASAP_PRIVATE_KEY}\n # The time until expiry of each given token. The token will be cached and then re-provisioned upon expiry. \n # For more info see the \"exp\" claim in the asap specification: https://s2sauth.bitbucket.io/spec/#access-token-generation\n ttl: 60s\n \nexporters:\n otlphttp/withauth:\n endpoint: http://localhost:9000\n auth:\n authenticator: asapclient\n\n otlp/withauth:\n endpoint: 0.0.0.0:5000\n ca_file: /tmp/certs/ca.pem\n auth:\n authenticator: asapclient \n```","properties":{"audience":{"items":{"type":"string"},"title":"audience","type":"array"},"issuer":{"title":"issuer","type":"string"},"key_id":{"title":"key_id","type":"string"},"private_key":{"title":"private_key","type":"string"},"ttl":{"title":"ttl","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.extension.awsproxy.Config":{"additionalProperties":false,"description":"Config defines the configuration for an AWS X-Ray proxy.","markdownDescription":"# AWS Proxy\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta] |\n| Distributions | [contrib], [aws], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[aws]: https://github.com/aws-observability/aws-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\n\nThe AWS proxy accepts requests without any authentication of AWS signatures applied and forwards them to the\nAWS API, applying authentication and signing. This allows applications to avoid needing AWS credentials to access\na service, instead configuring the AWS exporter and/or proxy in the OpenTelemetry collector and only providing the\ncollector with credentials.\n\n## Configuration\n\nExample:\n\n```yaml\nextensions:\n awsproxy:\n endpoint: 0.0.0.0:2000\n proxy_address: \"\"\n tls:\n insecure: false\n server_name_override: \"\"\n region: \"\"\n role_arn: \"\"\n aws_endpoint: \"\"\n local_mode: false\n```\n\n### endpoint (Optional)\nThe TCP address and port on which this proxy listens for requests.\n\nDefault: `0.0.0.0:2000`\n\n### proxy_address (Optional)\nDefines the proxy address that this extension forwards HTTP requests to the AWS backend through. If left unconfigured, requests will be sent directly.\nThis will generally be set to a NAT gateway when the collector is running on a network without public internet.\n\n### insecure (Optional)\nEnables or disables TLS certificate verification when this proxy forwards HTTP requests to the AWS backend. This sets the `InsecureSkipVerify` in the [TLSConfig](https://godoc.org/crypto/tls#Config). When setting to true, TLS is susceptible to man-in-the-middle attacks so it should be used only for testing.\n\nDefault: `false`\n\n### server_name_override (Optional)\nThis sets the ``ServerName` in the [TLSConfig](https://godoc.org/crypto/tls#Config).\n\n### region (Optional)\nThe AWS region this proxy forwards requests to. When missing, we will try to retrieve this value through environment variables or optionally ECS/EC2 metadata endpoint (depends on `local_mode` below).\n\n### role_arn (Optional)\nThe IAM role used by this proxy when communicating with the AWS service. If non-empty, the receiver will attempt to call STS to retrieve temporary credentials, otherwise the standard AWS credential [lookup](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials) will be performed.\n\n### aws_endpoint (Optional)\nThe AWS service endpoint which this proxy forwards requests to. If not set, will default to the AWS X-Ray endpoint.","properties":{"aws_endpoint":{"description":"AWSEndpoint is the X-Ray service endpoint which the local\nTCP server forwards requests to.","title":"aws_endpoint","type":"string"},"endpoint":{"description":"Endpoint configures the address for this network connection.\nThe address has the form \"host:port\". The host must be a literal IP address, or a host name that can be\nresolved to IP addresses. The port must be a literal port number or a service name.\nIf the host is a literal IPv6 address it must be enclosed in square brackets, as in \"[2001:db8::1]:80\" or\n\"[fe80::1%zone]:80\". The zone specifies the scope of the literal IPv6 address as defined in RFC 4007.","title":"endpoint","type":"string"},"local_mode":{"description":"LocalMode determines whether the EC2 instance metadata endpoint\nwill be called or not. Set to `true` to skip EC2 instance\nmetadata check.","title":"local_mode","type":"boolean"},"proxy_address":{"description":"ProxyAddress defines the proxy address that the local TCP server\nforwards HTTP requests to AWS X-Ray backend through.","title":"proxy_address","type":"string"},"region":{"description":"Region is the AWS region the local TCP server forwards requests to.","title":"region","type":"string"},"role_arn":{"description":"RoleARN is the IAM role used by the local TCP server when\ncommunicating with the AWS X-Ray service.","title":"role_arn","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration when forwarding\ncalls to the AWS X-Ray backend.","title":"tls"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.extension.basicauthextension.ClientAuthSettings":{"additionalProperties":false,"properties":{"password":{"description":"Password holds the password to use for client authentication.","title":"password","type":"string"},"username":{"description":"Username holds the username to use for client authentication.","title":"username","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.extension.basicauthextension.Config":{"additionalProperties":false,"properties":{"client_auth":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.extension.basicauthextension.ClientAuthSettings","description":"ClientAuth settings","title":"client_auth"},"htpasswd":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.extension.basicauthextension.HtpasswdSettings","description":"Htpasswd settings.","title":"htpasswd"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.extension.basicauthextension.HtpasswdSettings":{"additionalProperties":false,"markdownDescription":"# Basic Authenticator\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta] |\n| Distributions | [contrib], [observiq], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThis extension implements both `configauth.ServerAuthenticator` and `configauth.ClientAuthenticator` to authenticate clients and servers using Basic Authentication. The authenticator type has to be set to `basicauth`.\n\nWhen used as ServerAuthenticator, if the authentication is successful `client.Info.Auth` will expose the following attributes:\n\n- `username`: The username of the authenticated user.\n- `raw`: Raw base64 encoded credentials.\n\nThe configuration should specify only one instance of `basicauth` extension for either client or server authentication. \n\nThe following are the configuration options:\n\n- `htpasswd.file`: The path to the htpasswd file.\n- `htpasswd.inline`: The htpasswd file inline content.\n- `client_auth.username`: Username to use for client authentication.\n- `client_auth.password`: Password to use for client authentication.\n\nTo configure the extension as a server authenticator, either one of `htpasswd.file` or `htpasswd.inline` has to be set. If both are configured, `htpasswd.inline` credentials take precedence.\n\nTo configure the extension as a client authenticator, `client_auth` has to be set.\n\nIf both the options are configured, the extension will throw an error.\n## Configuration\n\n```yaml\nextensions:\n basicauth/server:\n htpasswd: \n file: .htpasswd\n inline: |\n ${env:BASIC_AUTH_USERNAME}:${env:BASIC_AUTH_PASSWORD}\n \n basicauth/client:\n client_auth: \n username: username\n password: password\n\nreceivers:\n otlp:\n protocols:\n http:\n auth:\n authenticator: basicauth/server\n\nprocessors:\n\nexporters:\n otlp:\n auth:\n authenticator: basicauth/client\n\nservice:\n extensions: [basicauth/server, basicauth/client]\n pipelines:\n traces:\n receivers: [otlp]\n processors: []\n exporters: [otlp]\n```","properties":{"file":{"description":"Path to the htpasswd file.","title":"file","type":"string"},"inline":{"description":"Inline contents of the htpasswd file.","title":"inline","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.extension.bearertokenauthextension.Config":{"additionalProperties":false,"description":"Config specifies how the Per-RPC bearer token based authentication data should be obtained.","properties":{"filename":{"description":"Filename points to a file that contains the bearer token to use for every RPC.","title":"filename","type":"string"},"scheme":{"description":"Scheme specifies the auth-scheme for the token. Defaults to \"Bearer\"","title":"scheme","type":"string"},"token":{"description":"BearerToken specifies the bearer token to use for every RPC.","title":"token","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.extension.headerssetterextension.Config":{"additionalProperties":false,"properties":{"headers":{"items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.extension.headerssetterextension.HeaderConfig"},"title":"headers","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.extension.headerssetterextension.HeaderConfig":{"additionalProperties":false,"properties":{"action":{"title":"action","type":"string"},"from_context":{"title":"from_context","type":"string"},"key":{"title":"key","type":"string"},"value":{"title":"value","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.extension.healthcheckextension.Config":{"additionalProperties":false,"description":"Config has the configuration for the extension enabling the health check extension, used to report the health status of the service.","properties":{"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth for this receiver","title":"auth"},"check_collector_pipeline":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.extension.healthcheckextension.checkCollectorPipelineSettings","description":"CheckCollectorPipeline contains the list of settings of collector pipeline health check","title":"check_collector_pipeline"},"cors":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.confighttp.CORSSettings","description":"CORS configures the server for HTTP cross-origin resource sharing (CORS).","title":"cors"},"endpoint":{"description":"Endpoint configures the listening address for the server.","title":"endpoint","type":"string"},"include_metadata":{"description":"IncludeMetadata propagates the client metadata from the incoming requests to the downstream consumers\nExperimental: *NOTE* this option is subject to change or removal in the future.","title":"include_metadata","type":"boolean"},"max_request_body_size":{"description":"MaxRequestBodySize sets the maximum request body size in bytes","title":"max_request_body_size","type":"integer"},"path":{"description":"Path represents the path the health check service will serve.\nThe default path is \"/\".","title":"path","type":"string"},"response_body":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.extension.healthcheckextension.ResponseBodySettings","description":"ResponseBody represents the body of the response returned by the health check service.\nThis overrides the default response that it would return.","title":"response_body"},"response_headers":{"description":"Additional headers attached to each HTTP response sent to the client.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"response_headers","type":"object"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSServerSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.extension.healthcheckextension.ResponseBodySettings":{"additionalProperties":false,"markdownDescription":"# Health Check\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta] |\n| Distributions | [core], [contrib], [aws], [observiq], [redhat], [splunk], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[aws]: https://github.com/aws-observability/aws-otel-collector\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[redhat]: https://github.com/os-observability/redhat-opentelemetry-collector\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nHealth Check extension enables an HTTP url that can be probed to check the\nstatus of the OpenTelemetry Collector. This extension can be used as a\nliveness and/or readiness probe on Kubernetes.\n\nThere is an optional configuration `check_collector_pipeline` which allows\nusers to enable health check for the collector pipeline. This feature can\nmonitor the number of times that components failed send data to the destinations.\nIt only supports monitoring exporter failures and will support receivers and\nprocessors in the future.\n\nThe following settings are required:\n\n- `endpoint` (default = 0.0.0.0:13133): Address to publish the health check status. For full list of `HTTPServerSettings` refer [here](https://github.com/open-telemetry/opentelemetry-collector/tree/main/config/confighttp).\n- `path` (default = \"/\"): Specifies the path to be configured for the health check server.\n- `response_body` (default = \"\"): Specifies a static body that overrides the default response returned by the health check service. \n- `check_collector_pipeline:` (optional): Settings of collector pipeline health check\n - `enabled` (default = false): Whether enable collector pipeline check or not\n - `interval` (default = \"5m\"): Time interval to check the number of failures\n - `exporter_failure_threshold` (default = 5): The failure number threshold to mark\n containers as healthy.\n\nExample:\n\n```yaml\nextensions:\n health_check:\n health_check/1:\n endpoint: \"localhost:13\"\n tls:\n ca_file: \"/path/to/ca.crt\"\n cert_file: \"/path/to/cert.crt\"\n key_file: \"/path/to/key.key\"\n path: \"/health/status\"\n check_collector_pipeline:\n enabled: true\n interval: \"5m\"\n exporter_failure_threshold: 5\n```\n\nThe full list of settings exposed for this exporter is documented [here](./config.go)\nwith detailed sample configurations [here](./testdata/config.yaml).","properties":{"healthy":{"description":"Healthy represents the body of the response returned when the collector is healthy.\nThe default value is \"\"","title":"healthy","type":"string"},"unhealthy":{"description":"Unhealthy represents the body of the response returned when the collector is unhealthy.\nThe default value is \"\"","title":"unhealthy","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.extension.healthcheckextension.checkCollectorPipelineSettings":{"additionalProperties":false,"properties":{"enabled":{"title":"enabled","type":"boolean"},"exporter_failure_threshold":{"title":"exporter_failure_threshold","type":"integer"},"interval":{"title":"interval","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.extension.httpforwarder.Config":{"additionalProperties":false,"description":"Config defines configuration for http forwarder extension.","markdownDescription":"# HTTP Forwarder Extension\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta] |\n| Distributions | [contrib], [splunk], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThis extension accepts HTTP requests, optionally adds headers to them and forwards them.\nThe RequestURIs of the original requests are preserved by the extension.\n\n## Configuration\n\nThe following settings are required:\n\n- `egress`: HTTP config settings to use for forwarding requests.\n - `endpoint` (no default): The target to which requests should be forwarded to.\n\nThe following settings can be optionally configured:\n\n- `ingress`: HTTP config settings for HTTP server listening to requests.\n - `endpoint` (default = `0.0.0.0:6060`): The host to which requests should be forwarded to.\n- `egress`: HTTP config settings to use for forwarding requests.\n - `headers` (default = `nil`): Additional headers to be added to all requests passing through the extension.\n - `timeout` (default = `10s`): How long to wait for each request to complete.\n\n### Example\n\n```yaml\n http_forwarder:\n ingress:\n endpoint: localhost:7070\n egress:\n endpoint: http://target/\n headers:\n otel_http_forwarder: dev\n timeout: 5s\n```\n\nThe full list of settings exposed for this exporter are documented [here](config.go)\nwith detailed sample configurations [here](testdata/config.yaml).","properties":{"egress":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.confighttp.HTTPClientSettings","description":"Egress holds config settings to use for forwarded requests.","title":"egress"},"ingress":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.confighttp.HTTPServerSettings","description":"Ingress holds config settings for HTTP server listening for requests.","title":"ingress"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.extension.jaegerremotesampling.Config":{"additionalProperties":false,"description":"Config has the configuration for the extension enabling the health check extension, used to report the health status of the service.","markdownDescription":"# Jaeger's Remote Sampling extension\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [alpha] |\n| Distributions | [contrib], [redhat], [sumo] |\n\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[redhat]: https://github.com/os-observability/redhat-opentelemetry-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThis extension allows serving sampling strategies following the Jaeger's remote sampling API. This extension can be configured to proxy requests to a backing remote sampling server, which could potentially be a Jaeger Collector down the pipeline, or a static JSON file from the local file system.\n\nBy default, two listeners are made available:\n- `localhost:5778`, following the legacy remote sampling endpoint as defined by Jaeger\n- `localhost:14250`, following the gRPC remote sampling endpoint, also defined by Jaeger\n\nNote that the port `14250` will clash with the Jaeger Receiver. When both are used, it's recommended to change this extension to use another port.\n\nAlthough this extension is derived from Jaeger, it can be used by any clients who can consume this standard, such as the [OpenTelemetry Java SDK](https://github.com/open-telemetry/opentelemetry-java/tree/v1.9.1/sdk-extensions/jaeger-remote-sampler).\n\nAt this moment, the `reload_interval` option is only effective for the `file` source. In the future, this property will be used to control a local cache for a `remote` source.\n\nThe `file` source can be used to load files from the local file system or from remote HTTP/S sources. The `remote` source must be used with a gRPC server that provides a Jaeger remote sampling service.\n\n## Configuration\n\n```yaml\nextensions:\n jaegerremotesampling:\n source:\n remote:\n endpoint: jaeger-collector:14250\n jaegerremotesampling/1:\n source:\n reload_interval: 1s\n file: /etc/otelcol/sampling_strategies.json\n jaegerremotesampling/2:\n source:\n reload_interval: 1s\n file: http://jaeger.example.com/sampling_strategies.json\n```\n\nA sampling strategy file could look like:\n\n```json\n{\n \"service_strategies\": [\n {\n \"service\": \"foo\",\n \"type\": \"probabilistic\",\n \"param\": 0.8,\n \"operation_strategies\": [\n {\n \"operation\": \"op1\",\n \"type\": \"probabilistic\",\n \"param\": 0.2\n },\n {\n \"operation\": \"op2\",\n \"type\": \"probabilistic\",\n \"param\": 0.4\n }\n ]\n },\n {\n \"service\": \"bar\",\n \"type\": \"ratelimiting\",\n \"param\": 5\n }\n ],\n \"default_strategy\": {\n \"type\": \"probabilistic\",\n \"param\": 0.5,\n \"operation_strategies\": [\n {\n \"operation\": \"/health\",\n \"type\": \"probabilistic\",\n \"param\": 0.0\n },\n {\n \"operation\": \"/metrics\",\n \"type\": \"probabilistic\",\n \"param\": 0.0\n }\n ]\n }\n}\n```\nSource: https://www.jaegertracing.io/docs/1.28/sampling/#collector-sampling-configuration","properties":{"grpc":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configgrpc.GRPCServerSettings","title":"grpc"},"http":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.confighttp.HTTPServerSettings","title":"http"},"source":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.extension.jaegerremotesampling.Source","description":"Source configures the source for the strategies file. One of `remote` or `file` has to be specified.","title":"source"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.extension.jaegerremotesampling.Source":{"additionalProperties":false,"properties":{"file":{"description":"File specifies a local file as the strategies source","title":"file","type":"string"},"reload_interval":{"description":"ReloadInterval determines the periodicity to refresh the strategies","title":"reload_interval","type":"string"},"remote":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configgrpc.GRPCClientSettings","description":"Remote defines the remote location for the file","title":"remote"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.extension.oauth2clientauthextension.Config":{"additionalProperties":false,"description":"Config stores the configuration for OAuth2 Client Credentials (2-legged OAuth2 flow) setup.","markdownDescription":"# Authenticator - OAuth2 Client Credentials\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta] |\n| Distributions | [contrib], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\n\nThis extension provides OAuth2 Client Credentials flow authenticator for HTTP and gRPC based exporters. The extension\nfetches and refreshes the token after expiry automatically. For further details about OAuth2 Client Credentials flow (2-legged workflow)\nrefer https://datatracker.ietf.org/doc/html/rfc6749#section-4.4.\n\nThe authenticator type has to be set to `oauth2client`.\n\n## Configuration\n\n```yaml\nextensions:\n oauth2client:\n client_id: someclientid\n client_secret: someclientsecret\n endpoint_params:\n audience: someaudience\n token_url: https://example.com/oauth2/default/v1/token\n scopes: [\"api.metrics\"]\n # tls settings for the token client\n tls:\n insecure: true\n ca_file: /var/lib/mycert.pem\n cert_file: certfile\n key_file: keyfile\n # timeout for the token client\n timeout: 2s\n \nreceivers:\n hostmetrics:\n scrapers:\n memory:\n otlp:\n protocols:\n grpc:\n\nexporters:\n otlphttp/withauth:\n endpoint: http://localhost:9000\n auth:\n authenticator: oauth2client\n \n otlp/withauth:\n endpoint: 0.0.0.0:5000\n ca_file: /tmp/certs/ca.pem\n auth:\n authenticator: oauth2client\n\nservice:\n extensions: [oauth2client]\n pipelines:\n metrics:\n receivers: [hostmetrics]\n processors: []\n exporters: [otlphttp/withauth, otlp/withauth]\n```\n\nFollowing are the configuration fields\n\n- [**token_url**](https://datatracker.ietf.org/doc/html/rfc6749#section-3.2) - The resource server's token endpoint URLs.\n- [**client_id**](https://datatracker.ietf.org/doc/html/rfc6749#section-2.2) - The client identifier issued to the client.\n- [**client_secret**](https://datatracker.ietf.org/doc/html/rfc6749#section-2.3.1) - The secret string associated with above identifier.\n- [**endpoint_params**](https://github.com/golang/oauth2/blob/master/clientcredentials/clientcredentials.go#L44) - Additional parameters that are sent to the token endpoint.\n- [**scopes**](https://datatracker.ietf.org/doc/html/rfc6749#section-3.3) - **Optional** optional requested permissions associated for the client.\n- [**timeout**](https://golang.org/src/net/http/client.go#L90) - **Optional** specifies the timeout on the underlying client to authorization server for fetching the tokens (initial and while refreshing).\n This is optional and not setting this configuration implies there is no timeout on the client.\n\nFor more information on client side TLS settings, see [configtls README](https://github.com/open-telemetry/opentelemetry-collector/tree/main/config/configtls).","properties":{"client_id":{"description":"ClientID is the application's ID.\nSee https://datatracker.ietf.org/doc/html/rfc6749#section-2.2","title":"client_id","type":"string"},"client_secret":{"description":"ClientSecret is the application's secret.\nSee https://datatracker.ietf.org/doc/html/rfc6749#section-2.3.1","title":"client_secret","type":"string"},"endpoint_params":{"$ref":"#/$defs/net.url.Values","description":"EndpointParams specifies additional parameters for requests to the token endpoint.","title":"endpoint_params"},"scopes":{"description":"Scope specifies optional requested permissions.\nSee https://datatracker.ietf.org/doc/html/rfc6749#section-3.3","items":{"type":"string"},"title":"scopes","type":"array"},"timeout":{"description":"Timeout parameter configures `http.Client.Timeout` for the underneath client to authorization\nserver while fetching and refreshing tokens.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration for the underneath client to authorization server.","title":"tls"},"token_url":{"description":"TokenURL is the resource server's token endpoint\nURL. This is a constant specific to each server.\nSee https://datatracker.ietf.org/doc/html/rfc6749#section-3.2","title":"token_url","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.extension.observer.dockerobserver.Config":{"additionalProperties":false,"description":"Config defines configuration for docker observer","markdownDescription":"# Docker Observer Extension\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta] |\n| Distributions | [contrib], [splunk], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe Docker observer extension is a [Receiver Creator](../../../receiver/receivercreator/README.md)-compatible \"watch observer\" that will detect and report\ncontainer endpoints discovered through the Docker API. Only containers that are in the state of `Running` and not `Paused` will emit endpoints.\nThis observer watches the Docker engine's stream of events to dynamically create, update, and remove endpoints as events are processed.\n\nRequires Docker API Version 1.22+.\n\nThe collector will need permissions to access the Docker Engine API, specifically it will need\nread access to the Docker socket (default `unix:///var/run/docker.sock`).\n\n\n## Example Config\n\n```yaml\nextensions:\n docker_observer:\n # url of the docker socket, default to unix:///var/run/docker.sock\n endpoint: my/path/to/docker.sock\n # list of container image names to exclude\n excluded_images: ['redis', 'another_image_name']\n # client API version, default to 1.22\n api_version: 1.24\n # max amount of time to wait for a response from Docker API , default to 5s\n timeout: 15s\n\nreceivers:\n receiver_creator:\n watch_observers: [docker_observer]\n receivers:\n nginx:\n rule: type == \"container\" and name matches \"nginx\" and port == 80\n config:\n endpoint: '`endpoint`/status'\n collection_interval: 10s\n```\n\n## Configuration\n\n### `endpoint`\n\nThe URL of the docker server.\n\ndefault: `unix:///var/run/docker.sock`\n\n### `timeout`\n\nThe maximum amount of time to wait for docker API responses.\n\ndefault: `5s`\n\n### `excluded_images`\n\nA list of filters whose matching images are to be excluded. Supports literals, globs, and regex.\n\ndefault: `[]`\n\n### `use_hostname_if_present`\n\nIf true, the `Config.Hostname` field (if present) of the docker\ncontainer will be used as the discovered host that is used to configure\nreceivers. If false or if no hostname is configured, the field\n`NetworkSettings.IPAddress` is used instead. These settings can be found\nin the output of the Docker API's [Container Inspect](https://docs.docker.com/engine/api/v1.41/#operation/ContainerInspect) json.\n\ndefault: `false`\n\n### `use_host_bindings`\n\nIf true, the observer will configure receivers for matching container endpoints\nusing the host bound ip and port. This is useful if containers exist that are not\naccessible to an instance of the collector running outside of the docker network stack.\n\ndefault: `false`\n\n### `ignore_non_host_bindings`\n\nIf true, the observer will ignore discovered container endpoints that are not bound\nto host ports. This is useful if containers exist that are not accessible\nto an instance of the collector running outside of the docker network stack.\n\ndefault: `false`\n\n### `cache_sync_interval`\n\nThe time to wait before resyncing the list of containers the observer maintains\nthrough the docker event listener example: `cache_sync_interval: \"20m\"`\n\ndefault: `60m`\n\n## Endpoint Variables\n\nThe following endpoint variables are exposed to the receiver creator to be used in discovery rules:\n\n| Variable | Type | Description |\n|----------|------|-------------|\n| name | string | Primary name of the container |\n| image | string | Name of the container image |\n| port | uint16 | Exposed port of the container |\n| alternate_port | uint16 | Exposed port accessed through redirection, such as a mapped port |\n| command | string | The command used to invoke the process of the container |\n| container_id | string | ID of the container |\n| host | string | Hostname or IP of the underlying host the container is running on |\n| transport | string | Transport protocol used by the endpoint (TCP or UDP) |\n| labels | map[string]string | User-specified metadata labels on the container |","properties":{"api_version":{"description":"Docker client API version. Default is 1.22","title":"api_version","type":"number"},"cache_sync_interval":{"description":"The time to wait before resyncing the list of containers the observer maintains\nthrough the docker event listener example: cache_sync_interval: \"20m\"\nDefault: \"60m\"","title":"cache_sync_interval","type":"string"},"endpoint":{"description":"The URL of the docker server. Default is \"unix:///var/run/docker.sock\"","title":"endpoint","type":"string"},"excluded_images":{"description":"A list of filters whose matching images are to be excluded. Supports literals, globs, and regex.","items":{"type":"string"},"title":"excluded_images","type":"array"},"ignore_non_host_bindings":{"description":"If true, the observer will ignore discovered container endpoints that are not bound\nto host ports. This is useful if containers exist that are not accessible\nto an instance of the agent running outside of the docker network stack.","title":"ignore_non_host_bindings","type":"boolean"},"timeout":{"description":"The maximum amount of time to wait for docker API responses. Default is 5s","title":"timeout","type":"string"},"use_host_bindings":{"description":"If true, the observer will configure receivers for matching container endpoints\nusing the host bound ip and port. This is useful if containers exist that are not\naccessible to an instance of the agent running outside of the docker network stack.\nIf UseHostnameIfPresent and this config are both enabled, this setting will take precedence.","title":"use_host_bindings","type":"boolean"},"use_hostname_if_present":{"description":"If true, the \"Config.Hostname\" field (if present) of the docker\ncontainer will be used as the discovered host that is used to configure\nreceivers. If false or if no hostname is configured, the field\n`NetworkSettings.IPAddress` is used instead.","title":"use_hostname_if_present","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.extension.observer.ecsobserver.Config":{"additionalProperties":false,"markdownDescription":"# Amazon Elastic Container Service Observer\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta] |\n| Distributions | [contrib], [aws], [splunk], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[aws]: https://github.com/aws-observability/aws-otel-collector\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe `ecsobserver` uses the ECS/EC2 API to discover prometheus scrape targets from all running tasks and filter them\nbased on service names, task definitions and container labels.\n\nNOTE: If you run collector as a sidecar, you should consider\nuse [ECS resource detector](../../../processor/resourcedetectionprocessor/README.md) instead. However, it does not have\nservice, EC2 instances etc. because it only queries local API.\n\n## Config\n\nThe configuration is based on\n[existing cloudwatch agent ECS discovery](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContainerInsights-Prometheus-Setup-autodiscovery-ecs.html)\n. A full collector config looks like the following:\n\n```yaml\nextensions:\n ecs_observer:\n refresh_interval: 60s # format is https://golang.org/pkg/time/#ParseDuration\n cluster_name: 'Cluster-1' # cluster name need manual config\n cluster_region: 'us-west-2' # region can be configured directly or use AWS_REGION env var\n result_file: '/etc/ecs_sd_targets.yaml' # the directory for file must already exists\n services:\n - name_pattern: '^retail-.*$'\n docker_labels:\n - port_label: 'ECS_PROMETHEUS_EXPORTER_PORT'\n task_definitions:\n - job_name: 'task_def_1'\n metrics_path: '/metrics'\n metrics_ports:\n - 9113\n - 9090\n arn_pattern: '.*:task-definition/nginx:[0-9]+'\n\nreceivers:\n prometheus:\n config:\n scrape_configs:\n - job_name: \"ecs-task\"\n file_sd_configs:\n - files:\n - '/etc/ecs_sd_targets.yaml' # MUST match the file name in ecs_observer.result_file\n relabel_configs: # Relabel here because label with __ prefix will be dropped by receiver.\n - source_labels: [ __meta_ecs_cluster_name ] # ClusterName\n action: replace\n target_label: ClusterName\n - source_labels: [ __meta_ecs_service_name ] # ServiceName\n action: replace\n target_label: ServiceName\n - action: labelmap # Convert docker labels on container to metric labels\n regex: ^__meta_ecs_container_labels_(.+)$ # Capture the key using regex, e.g. __meta_ecs_container_labels_Java_EMF_Metrics -\u003e Java_EMF_Metrics\n replacement: '$$1'\n\nprocessors:\n batch:\n\n# Use awsemf for CloudWatch Container Insights Prometheus. The extension does not have requirement on exporter.\nexporters:\n awsemf:\n\nservice:\n pipelines:\n metrics:\n receivers: [ prometheus ]\n processors: [ batch ]\n exporters: [ awsemf ]\n extensions: [ ecs_observer ]\n```\n\n| Name | | Description |\n|------------------|-----------|---------------------------------------------------------------------------------------------------------------------|\n| cluster_name | Mandatory | target ECS cluster name for service discovery |\n| cluster_region | Mandatory | target ECS cluster's AWS region name |\n| refresh_interval | Optional | how often to look for changes in endpoints (default: 10s) |\n| result_file | Mandatory | path of YAML file to write scrape target results. NOTE: the observer always returns empty in initial implementation |\n| services | Optional | list of service name patterns [detail](#ecs-service-name-based-filter-configuration) |\n| task_definitions | Optional | list of task definition arn patterns [detail](#ecs-task-definition-based-filter-configuration) |\n| docker_labels | Optional | list of docker labels [detail](#docker-label-based-filter-configuration) |\n\n### Output configuration\n\n`result_file` specifies where to write the discovered targets. It MUST match the files defined in `file_sd_configs` for\nprometheus receiver. See [output format](#output-format) for the detailed format.\n\n### Filters configuration\n\nThere are three type of filters, and they share the following common optional properties.\n\n- `job_name`\n- `metrics_path`\n- `metrics_ports` an array of port number\n\nExample\n\n```yaml\necs_observer:\n job_name: 'ecs-sd-job'\n services:\n - name_pattern: ^retail-.*$\n container_name_pattern: ^java-api-v[12]$\n - name_pattern: game\n metrics_path: /v3/343\n job_name: guilty-spark\n task_definitions:\n - arn_pattern: '*memcached.*'\n - arn_pattern: '^proxy-.*$'\n metrics_ports:\n - 9113\n - 9090\n metrics_path: /internal/metrics\n docker_labels:\n - port_label: ECS_PROMETHEUS_EXPORTER_PORT\n - port_label: ECS_PROMETHEUS_EXPORTER_PORT_V2\n metrics_path_label: ECS_PROMETHEUS_EXPORTER_METRICS_PATH\n```\n\n#### ECS Service Name based filter Configuration\n\n| Name | | Description |\n|------------------------|-----------|----------------------------------------------------------------------------------------------------|\n| name_pattern | Mandatory | Regex pattern to match against ECS service name |\n| metrics_ports | Mandatory | container ports separated by semicolon. Only containers that expose these ports will be discovered |\n| container_name_pattern | Optional | ECS task container name regex pattern |\n\n#### ECS Task Definition based filter Configuration\n\n| Name | | Description |\n|------------------------|-----------|----------------------------------------------------------------------------------------------------|\n| arn_pattern | Mandatory | Regex pattern to match against ECS task definition ARN |\n| metrics_ports | Mandatory | container ports separated by semicolon. Only containers that expose these ports will be discovered |\n| container_name_pattern | Optional | ECS task container name regex pattern |\n\n#### Docker Label based filter Configuration\n\nSpecify label keys to look up value\n\n| Name | | Description |\n|--------------------|-----------|---------------------------------------------------------------------------------|\n| port_label | Mandatory | container's docker label name that specifies the metrics port |\n| metrics_path_label | Optional | container's docker label name that specifies the metrics path. (Default: \"\") |\n| job_name_label | Optional | container's docker label name that specifies the scrape job name. (Default: \"\") |\n\n### Authentication\n\nIt uses the default credential chain, on ECS it is advised to\nuse [ECS task role](https://docs.aws.amazon.com/AmazonECS/latest/userguide/task-iam-roles.html). You need to deploy the\ncollector as an ECS task/service with\nthe [following permissions](https://docs.amazonaws.cn/en_us/AmazonCloudWatch/latest/monitoring/ContainerInsights-Prometheus-install-ECS.html#ContainerInsights-Prometheus-Setup-ECS-IAM)\n.\n\n**EC2** access is required for getting private IP for ECS EC2. However, EC2 permission can be removed if you are only\nusing Fargate because task ip comes from awsvpc instead of host.\n\n```text\nec2:DescribeInstances\necs:ListTasks\necs:ListServices\necs:DescribeContainerInstances\necs:DescribeServices\necs:DescribeTasks\necs:DescribeTaskDefinition\n```\n\n## Design\n\n- [Discovery](#discovery-mechanism)\n- [Notify receiver](#notify-prometheus-receiver-of-discovered-targets)\n- [Output format](#output-format)\n\n### Discovery mechanism\n\nThe extension polls ECS API periodically to get all running tasks and filter out scrape targets. There are 3 types of\nfilters for discovering targets, targets match the filter are kept. Targets from different filters are merged base\non `address/metrics_path` before updating/creating receiver.\n\n#### ECS Service Name based filter\n\nECS [Service](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html) is a deployment that\nmanages multiple tasks with same [definition](#ecs-task-definition-based-filter) (like Deployment and DaemonSet in k8s).\n\nThe `service`\nconfiguration matches both service name and container name (if not empty).\n\nNOTE: name of the service is **added** as label value with key `ServiceName`.\n\n```yaml\n# Example 1: Matches all containers that are started by retail-* service\nname_pattern: ^retail-.*$\n---\n# Example 2: Matches all container with name java-api in cash-app service \nname_pattnern: ^cash-app$\ncontainer_name_pattern: ^java-api$\n---\n# Example 3: Override default metrics_path (i.e. /metrics)\nname_pattern: ^log-replay-worker$\nmetrics_path: /v3/metrics\n```\n\n### ECS Task Definition based filter\n\nECS [task definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) contains one or\nmore containers (like Pod in k8s). Long running applications normally uses [service](#ecs-service-name-based-filter).\nwhile short running (batch) jobs can\nbe [created from task definitions directly](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/scheduling_tasks.html)\n.\n\nThe `task` definition matches both task definition name and container name (of not empty). Optional config\nlike `metrics_path`, `metrics_ports`, `job_name` can override default value.\n\n```yaml\n# Example 1: Matches all the tasks created from task definition that contains memcached in its arn\narn_pattern: \"*memcached.*\"\n```\n\n### Docker Label based filter\n\nDocker label can be specified in task definition. Only `port_label` is used when checking if the container should be\nincluded. Optional config like `metrics_path_label`, `job_name_label` can override default value.\n\n```yaml\n# Example 1: Matches all the container that has label ECS_PROMETHEUS_EXPORTER_PORT_NGINX\nport_label: 'ECS_PROMETHEUS_EXPORTER_PORT_NGINX'\n---\n# Example 2: Override job name based on label MY_APP_JOB_NAME\nport_label: 'ECS_PROMETHEUS_EXPORTER_PORT_MY_APP'\njob_name_label: 'MY_APP_JOB_NAME'\n```\n\n### Notify Prometheus Receiver of discovered targets\n\nThere are three ways to notify a receiver\n\n- Use [file based service discovery](#generate-target-file-for-file-based-discovery) in prometheus config and updates\n the file.\n- Use [receiver creator framework](#receiver-creator-framework) to create a new receiver for new endpoints.\n- Register as a prometheus discovery plugin.\n\n#### Generate target file for file based discovery\n\n- Status: implemented\n\nThis is current approach used by cloudwatch-agent and\nalso [recommended by prometheus](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#file_sd_config)\n. It's easier to debug and the main drawback is it only works for prometheus. Another minor issue is fsnotify may not\nwork properly occasionally and delay the update.\n\n#### Receiver creator framework\n\n- Status: pending\n\nThis is a generic approach that creates a new receiver at runtime based on discovered endpoints. The main problem is\nperformance issue as described\nin [this issue](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/1395).\n\n#### Register as prometheus discovery plugin\n\n- Status: pending\n\nBecause both the collector and prometheus is written in Go, we can call `discover.RegisterConfig` to make it a valid\nconfig for prometheus (like other in tree plugins like kubernetes). The drawback is the configuration is now under\nprometheus instead of extension and can cause confusion.\n\n## Output Format\n\n[Example in unit test](testdata/ut_targets.expected.yaml).\n\nThe format is based\non [cloudwatch agent](https://github.com/aws/amazon-cloudwatch-agent/tree/master/internal/ecsservicediscovery#example-result)\n, [ec2 sd](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#ec2_sd_config)\nand [kubernetes sd](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config).\nTask and labels from task definition are always included. EC2 info is only included when task is running on ECS EC2 (\ni.e. not on [Fargate](https://aws.amazon.com/fargate/)).\n\nUnlike cloudwatch agent, all the [additional labels](#additional-labels) starts with `__meta_ecs_` prefix. If they are\nnot renamed during [relabel](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config),\nthey will all get dropped in prometheus receiver and won't pass down along the pipeline.\n\nThe number of dimensions supported by [AWS EMF exporter](../../../exporter/awsemfexporter) is limited by its backend.\nThe labels can be modified/filtered at different stages, prometheus receiver\nrelabel, [Metrics Transform Processor](../../../processor/metricstransformprocessor)\nand [EMF exporter Metric Declaration](../../../exporter/awsemfexporter/README.md#metric_declaration)\n\n### Essential Labels\n\nRequired for prometheus to scrape the target.\n\n| Label Name | Source | Type | Description |\n|---------------------|------------------------------|--------|---------------------------------------------------------------------------|\n| `__address__` | ECS Task and TaskDefinition | string | `host:port` `host` is private ip from ECS Task, `port` is the mapped port |\n| ` __metrics_path__` | ECS TaskDefinition or Config | string | Default is `/metrics`, changes based on config/label |\n| `job` | ECS TaskDefinition or Config | string | Name for scrape job |\n\n### Additional Labels\n\nAdditional information from ECS and EC2.\n\n| Label Name | Source | Type | Description |\n|----------------------------------------------|--------------------|--------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| `__meta_ecs_task_definition_family` | ECS TaskDefinition | string | Name for registered task definition |\n| `__meta_ecs_task_definition_revision` | ECS TaskDefinition | int | Version of the task definition being used to run the task |\n| `__meta_ecs_task_launch_type` | ECS Task | string | `EC2` or `FARGATE` |\n| `__meta_ecs_task_group` | ECS Task | string | [Task Group](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement-constraints.html#task-groups) is `service:my-service-name` or specified when launching task directly |\n| `__meta_ecs_task_tags_\u003ctagkey\u003e` | ECS Task | string | Tags specified in [CreateService](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateService.html) and [RunTask](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_RunTask.html) |\n| `__meta_ecs_task_container_name` | ECS Task | string | Name of container |\n| `__meta_ecs_task_container_label_\u003clabelkey\u003e` | ECS TaskDefinition | string | Docker label specified in task definition |\n| `__meta_ecs_task_health_status` | ECS Task | string | `HEALTHY` or `UNHEALTHY`. `UNKNOWN` if not configured |\n| `__meta_ecs_ec2_instance_id` | EC2 | string | EC2 instance id for `EC2` launch type |\n| `__meta_ecs_ec2_instance_type` | EC2 | string | EC2 instance type e.g. `t3.medium`, `m6g.xlarge` |\n| `__meta_ecs_ec2_tags_\u003ctagkey\u003e` | EC2 | string | Tags specified when creating the EC2 instance |\n| `__meta_ecs_ec2_vpc_id` | EC2 | string | ID of VPC e.g. `vpc-abcdefeg` |\n| `__meta_ecs_ec2_private_ip` | EC2 | string | Private IP |\n| `__meta_ecs_ec2_public_ip` | EC2 | string | Public IP, if available |\n\n### Serialization\n\n- Labels, all the label value are encoded as string. (e.g. strconv.Itoa(123)).\n- Go struct, all the non string types are converted. labels and tags are passed as `map[string]string`\n instead of `[]KeyValue`\n- Prometheus target, each `taget`\n\n```go\n// PrometheusECSTarget contains address and labels extracted from a running ECS task \n// and its underlying EC2 instance (if available).\n// \n// For serialization\n// - FromLabels and ToLabels converts it between map[string]string.\n// - FromTargetYAML and ToTargetYAML converts it between prometheus file discovery format in YAML. \n// - FromTargetJSON and ToTargetJSON converts it between prometheus file discovery format in JSON. \ntype PrometheusECSTarget struct {\n\tAddress string `json:\"address\"`\n\tMetricsPath string `json:\"metrics_path\"`\n\tJob string `json:\"job\"`\n\tTaskDefinitionFamily string `json:\"task_definition_family\"`\n\tTaskDefinitionRevision int `json:\"task_definition_revision\"`\n\tTaskLaunchType string `json:\"task_launch_type\"`\n\tTaskGroup string `json:\"task_group\"`\n\tTaskTags map[string]string `json:\"task_tags\"`\n\tContainerName string `json:\"container_name\"`\n\tContainerLabels map[string]string `json:\"container_labels\"`\n\tHealthStatus string `json:\"health_status\"`\n\tEC2InstanceId string `json:\"ec2_instance_id\"`\n\tEC2InstanceType string `json:\"ec2_instance_type\"`\n\tEC2Tags map[string]string `json:\"ec2_tags\"`\n\tEC2VPCId string `json:\"ec2_vpc_id\"`\n\tEC2PrivateIP string `json:\"ec2_private_ip\"`\n\tEC2PublicIP string `json:\"ec2_public_ip\"`\n}\n```\n\n### Delta\n\nDelta is **not** supported because there is no watch API in ECS (unlike k8s, see [known issues](#known-issues)). The\noutput always contains all the targets. Caller/Consumer need to implement their own logic to calculate the targets diff\nif they only want to process new targets.\n\n## Known issues\n\n- There is no list watch API in ECS (unlike k8s), and we fetch ALL the tasks and filter it locally. If the poll interval\n is too short or there are multiple instances doing discovery, you may hit the (undocumented) API rate limit. In memory\n caching is implemented to reduce calls for task definition and ec2.\n- A single collector may not be able to handle a large cluster, you can use `hashmod`\n in [relabel_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config) to do\n static sharding. However, too many collectors may trigger the rate limit on AWS API as each shard is fetching ALL the\n tasks during discovery regardless of number of shards.\n\n## Implementation\n\nThe implementation has two parts, core ecs service discovery logic and adapter for notifying discovery results.\n\n### Packages\n\n- `extension/observer/ecsobserver` main logic\n- [internal/ecsmock](internal/ecsmock) mock ECS cluster\n- [internal/errctx](internal/errctx) structured error wrapping\n\n### Flow\n\nThe pseudocode showing the overall flow.\n\n```\nNewECSSD() {\n session := awsconfig.NewSssion()\n ecsClient := awsecs.NewClient(session)\n filters := config.NewFileters()\n decorator := awsec2.NewClient(session)\n for {\n select {\n case \u003c- timer:\n // Fetch ALL\n tasks := ecsClient.FaetchAll()\n // Filter\n filteredTasks := fileters.Apply(tasks)\n // Add EC2 info\n decorator.Apply(filteredTask)\n // Generate output\n if writeResultFile {\n writeFile(fileteredTasks, /etc/ecs_sd.yaml)\n } else {\n notifyObserver()\n }\n }\n }\n}\n```\n\n### Metrics\n\nFollowing metrics are logged at debug level. TODO(pingleig): Is there a way for otel plugins to export custom metrics to\notel's own /metrics.\n\n| Name | Type | Description |\n|--------------------------------------|------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| `discovered_targets` | int | Number of targets exported |\n| `discovered_taskss` | int | Number of tasks that contains scrape target, should be smaller than targets unless each task only contains one target |\n| `ignored_tasks` | int | Tasks ignored by filter, `discovered_tasks` and `ignored_tasks` should add up to `api_ecs_list_task_results`, one exception is API paging failed in the middle |\n| `targets_matched_by_service` | int | ECS Service name based filter |\n| `targets_matched_by_task_definition` | int | ECS TaskDefinition based filter |\n| `targets_matched_by_docker_label` | int | ECS DockerLabel based filter |\n| `target_error_noip` | int | Export failures because private ip not found |\n| `api_ecs_list_task_results` | int | Total number of tasks returned from ECS ListTask API |\n| `api_ecs_list_service_results` | int | Total number of services returned from ECS ListService API |\n| `api_error_auth` | int | Total number of error triggered by permission |\n| `api_error_rate_limit` | int | Total number of error triggered by rate limit |\n| `cache_size_container_instances` | int | Cached ECS ContainerInstance |\n| `cache_hit_container_instance` | int | Cache hit during the latest polling |\n| `cache_size_ec2_instance` | int | Cached EC2 Instance |\n| `cache_hit_ec2_instance` | int | Cache hit during the latest polling |\n\n### Error Handling\n\n- Auth and cluster not found error will cause the extension to stop (calling `host.ReportFatalError`). Although IAM role\n can be updated at runtime without restarting the collector, it's better to fail to make the problem obvious. Same\n applies to cluster not found. In the future we can add config to downgrade those errors if user want to monitor an ECS\n cluster with collector running outside the cluster, the collector can run anywhere as long as it can reach scrape\n targets and AWS API.\n- If we have non-critical error, we overwrite existing file with whatever targets we have, we might not have all the\n targets due to throttle etc.\n\n### Unit Test\n\nA mock ECS and EC2 server is in [internal/ecsmock](internal/ecsmock), see [fetcher_test](fetcher_test.go) for its usage.\n\n### Integration Test\n\nWill be implemented in [AOT Testing Framework](https://github.com/aws-observability/aws-otel-test-framework) to run\nagainst actual ECS service on both EC2 and Fargate.\n\n## Changelog\n\n- 2021-06-02 first version that actually works on ECS by @pingleig, thanks @anuraaga @Aneurysm9 @jrcamp @mxiamxia for\n reviewing (all the PRs ...)\n- 2021-02-24 Updated doc by @pingleig\n- 2020-12-29 Initial implementation by [Raphael](https://github.com/theRoughCode)\n in [#1920](https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/1920)","properties":{"cluster_name":{"description":"ClusterName is the target ECS cluster name for service discovery.","title":"cluster_name","type":"string"},"cluster_region":{"description":"ClusterRegion is the target ECS cluster's AWS region.","title":"cluster_region","type":"string"},"docker_labels":{"description":"DockerLabels is a list of docker labels for filtering containers within tasks.","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.extension.observer.ecsobserver.DockerLabelConfig"},"title":"docker_labels","type":"array"},"job_label_name":{"description":"JobLabelName is the override for prometheus job label, using `job` literal will cause error\nin otel prometheus receiver. See https://github.com/open-telemetry/opentelemetry-collector/issues/575","title":"job_label_name","type":"string"},"refresh_interval":{"description":"RefreshInterval determines how frequency at which the observer\nneeds to poll for collecting information about new processes.","title":"refresh_interval","type":"string"},"result_file":{"description":"ResultFile is the output path of the discovered targets YAML file (optional).\nThis is mainly used in conjunction with the Prometheus receiver.","title":"result_file","type":"string"},"services":{"description":"Services is a list of service name patterns for filtering tasks.","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.extension.observer.ecsobserver.ServiceConfig"},"title":"services","type":"array"},"task_definitions":{"description":"TaskDefinitions is a list of task definition arn patterns for filtering tasks.","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.extension.observer.ecsobserver.TaskDefinitionConfig"},"title":"task_definitions","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.extension.observer.ecsobserver.DockerLabelConfig":{"additionalProperties":false,"description":"DockerLabelConfig matches all tasks based on their docker label.","properties":{"job_name":{"title":"job_name","type":"string"},"job_name_label":{"title":"job_name_label","type":"string"},"metrics_path":{"title":"metrics_path","type":"string"},"metrics_path_label":{"title":"metrics_path_label","type":"string"},"metrics_ports":{"items":{"type":"integer"},"title":"metrics_ports","type":"array"},"port_label":{"description":"PortLabel is mandatory, empty string means docker label based match is skipped.","title":"port_label","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.extension.observer.ecsobserver.ServiceConfig":{"additionalProperties":false,"properties":{"container_name_pattern":{"description":"ContainerNamePattern is optional, empty string means all containers in that service would be exported.\nOtherwise both service and container name petterns need to metch.","title":"container_name_pattern","type":"string"},"job_name":{"title":"job_name","type":"string"},"metrics_path":{"title":"metrics_path","type":"string"},"metrics_ports":{"items":{"type":"integer"},"title":"metrics_ports","type":"array"},"name_pattern":{"description":"NamePattern is mandatory.","title":"name_pattern","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.extension.observer.ecsobserver.TaskDefinitionConfig":{"additionalProperties":false,"properties":{"arn_pattern":{"description":"ArnPattern is mandetory, empty string means arn based match is skipped.","title":"arn_pattern","type":"string"},"container_name_pattern":{"description":"ContainerNamePattern is optional, empty string means all containers in that task definition would be exported.\nOtherwise both service and container name petterns need to metch.","title":"container_name_pattern","type":"string"},"job_name":{"title":"job_name","type":"string"},"metrics_path":{"title":"metrics_path","type":"string"},"metrics_ports":{"items":{"type":"integer"},"title":"metrics_ports","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.extension.observer.ecstaskobserver.Config":{"additionalProperties":false,"markdownDescription":"# ECS Task Observer\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta] |\n| Distributions | [contrib], [splunk], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe `ecs_task_observer` is a [Receiver Creator](../../../receiver/receivercreator/README.md)-compatible \"watch observer\" that will detect and report\ncontainer endpoints for the running ECS task of which your Collector instance is a member. It is designed for and only supports \"sidecar\" deployments\nto detect co-located containers. For cluster wide use cases you should use the [ECS Observer](../ecsobserver/README.md) with a corresponding Prometheus receiver.\n\nThe Observer works by querying the available [task metadata endpoint](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint.html)\nand making all detected running containers available as endpoints for Receiver Creator usage. Because container metadata don't include any port mapping information,\nyou must include service-specific port `dockerLabels` in your task definition container entries. A docker label of `ECS_TASK_OBSERVER_PORT` with a valid port\nvalue will be attempted to be parsed for each reported container by default.\n\n**An instance of the Collector must be running in the ECS task from which you want to detect containers.**\n\n\u003e :construction: This extension is in alpha and configuration fields are subject to change.\n\n## Example Config\n\n```yaml\nextensions:\n ecs_task_observer:\n # the task metadata endpoint. If not set, detected by first of ECS_CONTAINER_METADATA_URI_V4 and ECS_CONTAINER_METADATA_URI\n # environment variables by default.\n endpoint: http://my.task.metadata.endpoint\n # the dockerLabels to use to try to extract target application ports. If not set \"ECS_TASK_OBSERVER_PORT\" will be used by default.\n port_labels: [A_DOCKER_LABEL_CONTAINING_DESIRED_PORT, ANOTHER_DOCKER_LABEL_CONTAINING_DESIRED_PORT]\n refresh_interval: 10s\n\nreceivers:\n receiver_creator:\n receivers:\n redis:\n rule: type == \"container\" \u0026\u0026 name matches \"redis\"\n config:\n password: `container.labels[\"SECRET\"]`\n watch_observers: [ecs_task_observer]\n```\n\nThe above config defines a custom task metadata endpoint and provides two port labels that will be used to set the resulting container endpoint's `port`.\nA corresponding redis container definition could look like the following:\n\n```json\n{\n \"containerDefinitions\": [\n {\n \"portMappings\": [\n {\n \"containerPort\": 6379,\n \"hostPort\": 6379\n }\n ],\n \"image\": \"redis\",\n \"dockerLabels\": {\n \"A_DOCKER_LABEL_CONTAINING_DESIRED_PORT\": \"6379\",\n \"SECRET\": \"my-redis-auth\"\n },\n \"name\": \"redis\"\n }\n ]\n}\n```\n\n\n### Config\n\nAs a rest client-utilizing extension, most of the ECS Task Observer's configuration is inherited from the Collector core\n[HTTP Client Configuration Settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/confighttp/README.md#client-configuration).\n\nAll fields are optional.\n\n| Name | Type | Default | Docs |\n| ---- | ---- | ------- | ---- |\n| endpoint |string| \u003cno value\u003e | The task metadata endpoint, detected from first of `ECS_CONTAINER_METADATA_URI_V4` and `ECS_CONTAINER_METADATA_URI` environment variables by default |\n| tls |[configtls-TLSClientSetting](#configtls-tlsclientsetting)| \u003cno value\u003e | TLSSetting struct exposes TLS client configuration. |\n| read_buffer_size |int| \u003cno value\u003e | ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize. |\n| write_buffer_size |int| \u003cno value\u003e | WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize. |\n| timeout |[time-Duration](#time-duration)| \u003cno value\u003e | Timeout parameter configures `http.Client.Timeout`. |\n| headers |map[string]string| \u003cno value\u003e | Additional headers attached to each HTTP request sent by the client. Existing header values are overwritten if collision happens. |\n| customroundtripper |func(http.RoundTripper) (http.RoundTripper, error)| \u003cno value\u003e | Custom Round Tripper to allow for individual components to intercept HTTP requests |\n| auth |[Authentication]| \u003cno value\u003e | Auth configuration for outgoing HTTP calls. |\n| refresh_interval |[time-Duration](#time-duration)| 30s | RefreshInterval determines the frequency at which the observer needs to poll for collecting new information about task containers. |\n| port_labels |[]string| `[ECS_TASK_OBSERVER_PORT]` | PortLabels is a list of container Docker labels from which to obtain the observed Endpoint port. The first label with valid port found will be used. If no PortLabels provided, default of ECS_TASK_OBSERVER_PORT will be used. |\n\n### configtls-TLSClientSetting\n\n| Name | Type | Default | Docs |\n| ---- | ---- | ------- | ---- |\n| ca_file |string| \u003cno value\u003e | Path to the CA cert. For a client this verifies the server certificate. For a server this verifies client certificates. If empty uses system root CA. (optional) |\n| cert_file |string| \u003cno value\u003e | Path to the TLS cert to use for TLS required connections. (optional) |\n| key_file |string| \u003cno value\u003e | Path to the TLS key to use for TLS required connections. (optional) |\n| min_version |string| \u003cno value\u003e | MinVersion sets the minimum TLS version that is acceptable. If not set, TLS 1.0 is used. (optional) |\n| max_version |string| \u003cno value\u003e | MaxVersion sets the maximum TLS version that is acceptable. If not set, TLS 1.3 is used. (optional) |\n| insecure |bool| \u003cno value\u003e | In gRPC when set to true, this is used to disable the client transport security. See https://godoc.org/google.golang.org/grpc#WithInsecure. In HTTP, this disables verifying the server's certificate chain and host name (InsecureSkipVerify in the tls Config). Please refer to https://godoc.org/crypto/tls#Config for more information. (optional, default false) |\n| insecure_skip_verify |bool| \u003cno value\u003e | InsecureSkipVerify will enable TLS but not verify the certificate. |\n| server_name_override |string| \u003cno value\u003e | ServerName requested by client for virtual hosting. This sets the ServerName in the TLSConfig. Please refer to https://godoc.org/crypto/tls#Config for more information. (optional) |\n\n### time-Duration\nAn optionally signed sequence of decimal numbers, each with a unit suffix, such as `300ms`, `-1.5h`, or `2h45m`. Valid time units are `ns`, `us`, `ms`, `s`, `m`, `h`.\n\n[Authentication]: https://github.com/open-telemetry/opentelemetry-collector/tree/main/config/configauth","properties":{"CustomRoundTripper":{"description":"Custom Round Tripper to allow for individual components to intercept HTTP requests","title":"CustomRoundTripper"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing HTTP calls.","title":"auth"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"endpoint":{"description":"The target URL to send data to (e.g.: http://some.url:9411/v1/traces).","title":"endpoint","type":"string"},"headers":{"description":"Additional headers attached to each HTTP request sent by the client.\nExisting header values are overwritten if collision happens.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"idle_conn_timeout":{"description":"IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"idle_conn_timeout","type":"string"},"max_conns_per_host":{"description":"MaxConnsPerHost limits the total number of connections per host, including connections in the dialing,\nactive, and idle states.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_conns_per_host","type":"integer"},"max_idle_conns":{"description":"MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns","type":"integer"},"max_idle_conns_per_host":{"description":"MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns_per_host","type":"integer"},"port_labels":{"description":"PortLabels is a list of container Docker labels from which to obtain the observed Endpoint port.\nThe first label with valid port found will be used. If no PortLabels provided, default of\nECS_TASK_OBSERVER_PORT will be used.","items":{"type":"string"},"title":"port_labels","type":"array"},"read_buffer_size":{"description":"ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize.","title":"read_buffer_size","type":"integer"},"refresh_interval":{"description":"RefreshInterval determines the frequency at which the observer\nneeds to poll for collecting new information about task containers.","title":"refresh_interval","type":"string"},"timeout":{"description":"Timeout parameter configures `http.Client.Timeout`.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"write_buffer_size":{"description":"WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize.","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.extension.observer.hostobserver.Config":{"additionalProperties":false,"description":"Config defines configuration for host observer.","markdownDescription":"# Host Observer Extension\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta] |\n| Distributions | [contrib], [splunk], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe `host_observer` looks at the current host for listening network endpoints.\n\nIt will look for all listening sockets on TCP and UDP over IPv4 and IPv6.\n\nIt uses the /proc filesystem and requires the SYS_PTRACE and DAC_READ_SEARCH capabilities so that it can determine what processes own the listening sockets.\n\n### Configuration\n\n#### `refresh_interval`\n\nDetermines how often to look for changes in endpoints.\n\ndefault: `10s`\n\n### Endpoint Variables\n\nEndpoint variables exposed by this observer are as follows.\n\n| Variable | Description |\n|-----------|--------------------------------------------------------------------------------------------|\n| type | `\"port\"` |\n| name | name of the process associated to the port |\n| port | port number |\n| command | full command used to invoke this process, including the executable itself at the beginning |\n| is_ipv6 | `true` if the endpoint is IPv6 |\n| transport | \"TCP\" or \"UDP\" |","properties":{"refresh_interval":{"description":"RefreshInterval determines how frequency at which the observer\nneeds to poll for collecting information about new processes.","title":"refresh_interval","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.extension.observer.k8sobserver.Config":{"additionalProperties":false,"description":"Config defines configuration for k8s attributes processor.","markdownDescription":"# Kubernetes Observer\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [alpha] |\n| Distributions | [contrib], [splunk], [sumo] |\n\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe `k8s_observer` is a [Receiver Creator](../../../receiver/receivercreator/README.md)-compatible \"watch observer\" that will detect and report\nKubernetes pod, port, and node endpoints via the Kubernetes API.\n\n## Example Config\n\n```yaml\nextensions:\n k8s_observer:\n auth_type: serviceAccount\n node: ${env:K8S_NODE_NAME}\n observe_pods: true\n observe_nodes: true\n\nreceivers:\n receiver_creator:\n watch_observers: [k8s_observer]\n receivers:\n redis:\n rule: type == \"port\" \u0026\u0026 pod.name matches \"redis\"\n config:\n password: '`pod.labels[\"SECRET\"]`'\n kubeletstats:\n rule: type == \"k8s.node\"\n config:\n auth_type: serviceAccount\n collection_interval: 10s\n endpoint: \"`endpoint`:`kubelet_endpoint_port`\"\n extra_metadata_labels:\n - container.id\n metric_groups:\n - container\n - pod\n - node\n```\n\nThe `node` field can be set to the node name to limit discovered endpoints. For example, its name value can be obtained using the downward API inside a Collector pod spec as follows:\n\n```yaml\nenv:\n - name: K8S_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n```\n\nThis spec-determined value would then be available via the `${env:K8S_NODE_NAME}` usage in the observer configuration.\n\n## Config\n\nAll fields are optional.\n\n| Name | Type | Default | Docs |\n| ---- | ---- | ------- | ---- |\n| auth_type | string | `serviceAccount` | How to authenticate to the K8s API server. This can be one of `none` (for no auth), `serviceAccount` (to use the standard service account token provided to the agent pod), or `kubeConfig` to use credentials from `~/.kube/config`. |\n| node | string | \u003cno value\u003e | The node name to limit the discovery of pod, port, and node endpoints. Providing no value (the default) results in discovering endpoints for all available nodes. |\n| observe_pods | bool | `true` | Whether to report observer pod and port endpoints. If `true` and `node` is specified it will only discover pod and port endpoints whose `spec.nodeName` matches the provided node name. If `true` and `node` isn't specified, it will discover all available pod and port endpoints. Please note that Collector connectivity to pods from other nodes is dependent on your cluster configuration and isn't guaranteed. | \n| observe_nodes | bool | `false` | Whether to report observer k8s.node endpoints. If `true` and `node` is specified it will only discover node endpoints whose `metadata.name` matches the provided node name. If `true` and `node` isn't specified, it will discover all available node endpoints. Please note that Collector connectivity to nodes is dependent on your cluster configuration and isn't guaranteed.|","properties":{"auth_type":{"description":"How to authenticate to the K8s API server. This can be one of `none`\n(for no auth), `serviceAccount` (to use the standard service account\ntoken provided to the agent pod), or `kubeConfig` to use credentials\nfrom `~/.kube/config`.","title":"auth_type","type":"string"},"node":{"description":"Node is the node name to limit the discovery of pod, port, and node endpoints.\nProviding no value (the default) results in discovering endpoints for all available nodes.\nFor example, node name can be set using the downward API inside the collector\npod spec as follows:\n\nenv:\n - name: K8S_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n\nThen set this value to ${env:K8S_NODE_NAME} in the configuration.","title":"node","type":"string"},"observe_nodes":{"description":"ObserveNodes determines whether to report observer k8s.node endpoints. If `true` and Node is specified\nit will only discover node endpoints whose `metadata.name` matches the provided node name. If `true` and\nNode isn't specified, it will discover all available node endpoints. `false` by default.","title":"observe_nodes","type":"boolean"},"observe_pods":{"description":"ObservePods determines whether to report observer pod and port endpoints. If `true` and Node is specified\nit will only discover pod and port endpoints whose `spec.nodeName` matches the provided node name. If `true` and\nNode isn't specified, it will discover all available pod and port endpoints. `true` by default.","title":"observe_pods","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.extension.oidcauthextension.Config":{"additionalProperties":false,"description":"Config has the configuration for the OIDC Authenticator extension.","markdownDescription":"# Authenticator - OIDC\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta] |\n| Distributions | [contrib], [observiq], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThis extension implements a `configauth.ServerAuthenticator`, to be used in receivers inside the `auth` settings. The authenticator type has to be set to `oidc`.\n\n## Configuration\n\n```yaml\nextensions:\n oidc:\n issuer_url: http://localhost:8080/auth/realms/opentelemetry\n issuer_ca_path: /etc/pki/tls/cert.pem\n audience: account\n username_claim: email\n\nreceivers:\n otlp:\n protocols:\n grpc:\n auth:\n authenticator: oidc\n\nprocessors:\n\nexporters:\n logging:\n logLevel: debug\n\nservice:\n extensions: [oidc]\n pipelines:\n traces:\n receivers: [otlp]\n processors: []\n exporters: [logging]\n```","properties":{"attribute":{"description":"The attribute (header name) to look for auth data. Optional, default value: \"authorization\".","title":"attribute","type":"string"},"audience":{"description":"Audience of the token, used during the verification.\nFor example: \"https://accounts.google.com\" or \"https://login.salesforce.com\".\nRequired.","title":"audience","type":"string"},"groups_claim":{"description":"The claim that holds the subject's group membership information.\nOptional.","title":"groups_claim","type":"string"},"issuer_ca_path":{"description":"The local path for the issuer CA's TLS server cert.\nOptional.","title":"issuer_ca_path","type":"string"},"issuer_url":{"description":"IssuerURL is the base URL for the OIDC provider.\nRequired.","title":"issuer_url","type":"string"},"username_claim":{"description":"The claim to use as the username, in case the token's 'sub' isn't the suitable source.\nOptional.","title":"username_claim","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.extension.pprofextension.Config":{"additionalProperties":false,"description":"Config has the configuration for the extension enabling the golang net/http/pprof (Performance Profiler) extension.","markdownDescription":"# Performance Profiler\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta] |\n| Distributions | [core], [contrib], [aws], [observiq], [splunk], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[aws]: https://github.com/aws-observability/aws-otel-collector\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nPerformance Profiler extension enables the golang `net/http/pprof` endpoint.\nThis is typically used by developers to collect performance profiles and\ninvestigate issues with the service.\n\nThe following settings are required:\n\n- `endpoint` (default = localhost:1777): The endpoint in which the pprof will\nbe listening to. Use localhost:\u003cport\u003e to make it available only locally, or\n\":\u003cport\u003e\" to make it available on all network interfaces.\n- `block_profile_fraction` (default = 0): Fraction of blocking events that\nare profiled. A value \u003c= 0 disables profiling. See\nhttps://golang.org/pkg/runtime/#SetBlockProfileRate for details.\n- `mutex_profile_fraction` (default = 0): Fraction of mutex contention\nevents that are profiled. A value \u003c= 0 disables profiling. See\nhttps://golang.org/pkg/runtime/#SetMutexProfileFraction for details.\n\nThe following settings can be optionally configured:\n\n- `save_to_file`: File name to save the CPU profile to. The profiling starts when the\nCollector starts and is saved to the file when the Collector is terminated.\n\nExample:\n```yaml\n\nextensions:\n pprof:\n```\n\nThe full list of settings exposed for this exporter are documented [here](./config.go)\nwith detailed sample configurations [here](./testdata/config.yaml).","properties":{"block_profile_fraction":{"description":"Fraction of blocking events that are profiled. A value \u003c= 0 disables\nprofiling. See https://golang.org/pkg/runtime/#SetBlockProfileRate for details.","title":"block_profile_fraction","type":"integer"},"endpoint":{"description":"Endpoint configures the address for this network connection.\nThe address has the form \"host:port\". The host must be a literal IP address, or a host name that can be\nresolved to IP addresses. The port must be a literal port number or a service name.\nIf the host is a literal IPv6 address it must be enclosed in square brackets, as in \"[2001:db8::1]:80\" or\n\"[fe80::1%zone]:80\". The zone specifies the scope of the literal IPv6 address as defined in RFC 4007.","title":"endpoint","type":"string"},"mutex_profile_fraction":{"description":"Fraction of mutex contention events that are profiled. A value \u003c= 0\ndisables profiling. See https://golang.org/pkg/runtime/#SetMutexProfileFraction\nfor details.","title":"mutex_profile_fraction","type":"integer"},"save_to_file":{"description":"Optional file name to save the CPU profile to. The profiling starts when the\nCollector starts and is saved to the file when the Collector is terminated.","title":"save_to_file","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.extension.sigv4authextension.AssumeRole":{"additionalProperties":false,"description":"AssumeRole holds the configuration needed to assume a role","properties":{"arn":{"title":"arn","type":"string"},"session_name":{"title":"session_name","type":"string"},"sts_region":{"title":"sts_region","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.extension.sigv4authextension.Config":{"additionalProperties":false,"description":"Config stores the configuration for the Sigv4 Authenticator","markdownDescription":"# Authenticator - Sigv4\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta] |\n| Distributions | [contrib], [aws], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[aws]: https://github.com/aws-observability/aws-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThis extension provides Sigv4 authentication for making requests to AWS services. For more information on the Sigv4 process, please look [here](https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html).\n\n## Configuration\n\nThe configuration fields are as follows:\n\n* `assume_role`: **Optional**. Specifies the configuration needed to assume a role\n * `arn`: The Amazon Resource Name (ARN) of a role to assume\n * `session_name`: **Optional**. The name of a role session\n * `sts_region`: The AWS region where STS is used to assumed the configured role\n * Note that if a role is intended to be assumed, and `sts_region` is not provided, then `sts_region` will default to the value for `region` if `region` is provided\n* `region`: **Optional**. The AWS region for the service you are exporting to for AWS Sigv4. This is differentiated from `sts_region` to handle cross region authentication\n * Note that an attempt will be made to obtain a valid region from the endpoint of the service you are exporting to\n * [List of AWS regions](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html)\n* `service`: **Optional**. The AWS service for AWS Sigv4\n * Note that an attempt will be made to obtain a valid service from the endpoint of the service you are exporting to\n\n\n```yaml\nextensions:\n sigv4auth:\n assume_role:\n arn: \"arn:aws:iam::123456789012:role/aws-service-role/access\"\n sts_region: \"us-east-1\"\n\nreceivers:\n hostmetrics:\n scrapers:\n memory:\n\nexporters:\n prometheusremotewrite:\n endpoint: \"https://aps-workspaces.us-west-2.amazonaws.com/workspaces/ws-XXX/api/v1/remote_write\"\n auth:\n authenticator: sigv4auth\n\nservice:\n extensions: [sigv4auth]\n pipelines:\n metrics:\n receivers: [hostmetrics]\n processors: []\n exporters: [prometheusremotewrite]\n```\n\n## Notes\n\n* The collector must have valid AWS credentials as used by the [AWS SDK for Go](https://aws.github.io/aws-sdk-go-v2/docs/configuring-sdk/#specifying-credentials)","properties":{"assume_role":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.extension.sigv4authextension.AssumeRole","title":"assume_role"},"region":{"title":"region","type":"string"},"service":{"title":"service","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.extension.storage.dbstorage.Config":{"additionalProperties":false,"description":"Config defines configuration for dbstorage extension.","markdownDescription":"# Database Storage\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [alpha] |\n| Distributions | [contrib], [sumo] |\n\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\n\u003e :construction: This extension is in alpha. Configuration and functionality are subject to change.\n\nThe Database Storage extension can persist state to a relational database. \n\nThe extension requires read and write access to a database table.\n\n`driver`: the name of the database driver to use. By default, the storage client supports \"sqlite3\" and \"pgx\".\n\nImplementors can add additional driver support by importing SQL drivers into the program.\nSee [Golang database/sql package documentation](https://pkg.go.dev/database/sql) for more information.\n\n`datasource`: the url of the database, in the format accepted by the driver.\n\n\n```\nextensions:\n db_storage:\n driver: \"sqlite3\"\n datasource: \"foo.db?_busy_timeout=10000\u0026_journal=WAL\u0026_sync=NORMAL\"\n\nservice:\n extensions: [db_storage]\n pipelines:\n traces:\n receivers: [nop]\n processors: [nop]\n exporters: [nop]\n\n# Data pipeline is required to load the config.\nreceivers:\n nop:\nprocessors:\n nop:\nexporters:\n nop:\n```","properties":{"datasource":{"title":"datasource","type":"string"},"driver":{"title":"driver","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.extension.storage.filestorage.CompactionConfig":{"additionalProperties":false,"description":"CompactionConfig defines configuration for optional file storage compaction.","properties":{"check_interval":{"description":"CheckInterval specifies frequency of compaction check","title":"check_interval","type":"string"},"directory":{"description":"Directory specifies where the temporary files for compaction will be stored","title":"directory","type":"string"},"max_transaction_size":{"description":"MaxTransactionSize specifies the maximum number of items that might be present in single compaction iteration","title":"max_transaction_size","type":"integer"},"on_rebound":{"description":"OnRebound specifies that compaction is attempted online, when rebound conditions are met.\nThis typically happens when storage usage has increased, which caused increase in space allocation\nand afterwards it had most items removed. We want to run the compaction online only when there are\nnot too many elements still being stored (which is an indication that \"heavy usage\" period is over)\nso compaction should be relatively fast and at the same time there is relatively large volume of space\nthat might be reclaimed.","title":"on_rebound","type":"boolean"},"on_start":{"description":"OnStart specifies that compaction is attempted each time on start","title":"on_start","type":"boolean"},"rebound_needed_threshold_mib":{"description":"ReboundNeededThresholdMiB specifies the minimum total allocated size (both used and empty)\nto mark the need for online compaction","title":"rebound_needed_threshold_mib","type":"integer"},"rebound_trigger_threshold_mib":{"description":"ReboundTriggerThresholdMiB is used when compaction is marked as needed. When allocated data size drops\nbelow the specified value, the compactions starts and the flag marking need for compaction is cleared","title":"rebound_trigger_threshold_mib","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.extension.storage.filestorage.Config":{"additionalProperties":false,"description":"Config defines configuration for file storage extension.","markdownDescription":"# File Storage\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta] |\n| Distributions | [contrib], [observiq], [splunk], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe File Storage extension can persist state to the local file system.\n\nThe extension requires read and write access to a directory. A default directory can be used, but it must already exist in order for the extension to operate.\n\n`directory` is the relative or absolute path to the dedicated data storage directory. \nThe default directory is `%ProgramData%\\Otelcol\\FileStorage` on Windows and `/var/lib/otelcol/file_storage` otherwise.\n\n`timeout` is the maximum time to wait for a file lock. This value does not need to be modified in most circumstances.\nThe default timeout is `1s`.\n\n## Compaction\n`compaction` defines how and when files should be compacted. There are two modes of compaction available (both of which can be set concurrently):\n- `compaction.on_start` (default: false), which happens when collector starts\n- `compaction.on_rebound` (default: false), which happens online when certain criteria are met; it's discussed in more detail below\n\n`compaction.directory` specifies the directory used for compaction (as a midstep).\n\n`compaction.max_transaction_size` (default: 65536): defines maximum size of the compaction transaction.\nA value of zero will ignore transaction sizes.\n\n### Rebound (online) compaction\n\nFor rebound compaction, there are two additional parameters available:\n- `compaction.rebound_needed_threshold_mib` (default: 100) - when allocated data exceeds this amount, the \"compaction needed\" flag will be enabled\n- `compaction.rebound_trigger_threshold_mib` (default: 10) - if the \"compaction needed\" flag is set and allocated data drops below this amount, compaction will begin and the \"compaction needed\" flag will be cleared\n- `compaction.check_interval` (default: 5s) - specifies how frequently the conditions for compaction are being checked\n\nThe idea behind rebound compaction is that in certain workloads (e.g. [persistent queue](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/exporterhelper#persistent-queue)) the storage might grow significantly (e.g. when the exporter is unable to send the data due to network problem) after which it is being emptied as the underlying issue is gone (e.g. network connectivity is back). This leaves a significant space that needs to be reclaimed (also, this space is reported in memory usage as mmap() is used underneath). The optimal conditions for this to happen online is after the storage is largely drained, which is being controlled by `rebound_trigger_threshold_mib`. To make sure this is not too sensitive, there's also `rebound_needed_threshold_mib` which specifies the total claimed space size that must be met for online compaction to even be considered. Consider following diagram for an example of meeting the rebound (online) compaction conditions.\n\n```\n ▲\n │\n │ XX.............\nm │ XXXX............\ne ├───────────XXXXXXX..........──────────── rebound_needed_threshold_mib\nm │ XXXXXXXXX..........\no │ XXXXXXXXXXX.........\nr │ XXXXXXXXXXXXXXXXX....\ny ├─────XXXXXXXXXXXXXXXXXXXXX..──────────── rebound_trigger_threshold_mib\n │ XXXXXXXXXXXXXXXXXXXXXXXXXX.........\n │ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n └──────────────── time ─────────────────►\n │ | |\n issue draining compaction happens\n starts begins and reclaims space\n\n X - actually used space\n . - claimed but no longer used space\n```\n\n\n## Example\n\n```\nextensions:\n file_storage:\n file_storage/all_settings:\n directory: /var/lib/otelcol/mydir\n timeout: 1s\n compaction:\n on_start: true\n directory: /tmp/\n max_transaction_size: 65_536\n\nservice:\n extensions: [file_storage, file_storage/all_settings]\n pipelines:\n traces:\n receivers: [nop]\n processors: [nop]\n exporters: [nop]\n\n# Data pipeline is required to load the config.\nreceivers:\n nop:\nprocessors:\n nop:\nexporters:\n nop:\n```","properties":{"compaction":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.extension.storage.filestorage.CompactionConfig","title":"compaction"},"directory":{"title":"directory","type":"string"},"timeout":{"title":"timeout","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.internal.aws.proxy.Config":{"additionalProperties":false,"description":"Config is the configuration for the local TCP proxy server.","properties":{"aws_endpoint":{"description":"AWSEndpoint is the X-Ray service endpoint which the local\nTCP server forwards requests to.","title":"aws_endpoint","type":"string"},"endpoint":{"description":"Endpoint configures the address for this network connection.\nThe address has the form \"host:port\". The host must be a literal IP address, or a host name that can be\nresolved to IP addresses. The port must be a literal port number or a service name.\nIf the host is a literal IPv6 address it must be enclosed in square brackets, as in \"[2001:db8::1]:80\" or\n\"[fe80::1%zone]:80\". The zone specifies the scope of the literal IPv6 address as defined in RFC 4007.","title":"endpoint","type":"string"},"local_mode":{"description":"LocalMode determines whether the EC2 instance metadata endpoint\nwill be called or not. Set to `true` to skip EC2 instance\nmetadata check.","title":"local_mode","type":"boolean"},"proxy_address":{"description":"ProxyAddress defines the proxy address that the local TCP server\nforwards HTTP requests to AWS X-Ray backend through.","title":"proxy_address","type":"string"},"region":{"description":"Region is the AWS region the local TCP server forwards requests to.","title":"region","type":"string"},"role_arn":{"description":"RoleARN is the IAM role used by the local TCP server when\ncommunicating with the AWS X-Ray service.","title":"role_arn","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration when forwarding\ncalls to the AWS X-Ray backend.","title":"tls"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.internal.aws.xray.telemetry.Config":{"additionalProperties":false,"properties":{"contributors":{"description":"Contributors can be used to explicitly define which X-Ray components are contributing to the telemetry.\nIf omitted, only X-Ray components with the same component.ID as the setup component will have access.","items":{"type":"string"},"title":"contributors","type":"array"},"enabled":{"description":"Enabled determines whether any telemetry should be recorded.","title":"enabled","type":"boolean"},"hostname":{"description":"Hostname can be used to explicitly define the hostname associated with the telemetry.","title":"hostname","type":"string"},"include_metadata":{"description":"IncludeMetadata determines whether metadata (instance ID, hostname, resourceARN)\nshould be included in the telemetry.","title":"include_metadata","type":"boolean"},"instance_id":{"description":"InstanceID can be used to explicitly define the instance ID associated with the telemetry.","title":"instance_id","type":"string"},"resource_arn":{"description":"ResourceARN can be used to explicitly define the resource ARN associated with the telemetry.","title":"resource_arn","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.internal.coreinternal.attraction.ActionKeyValue":{"additionalProperties":false,"description":"ActionKeyValue specifies the attribute key to act upon.","properties":{"action":{"description":"Action specifies the type of action to perform.\nThe set of values are {INSERT, UPDATE, UPSERT, DELETE, HASH}.\nBoth lower case and upper case are supported.\nINSERT - Inserts the key/value to attributes when the key does not exist.\n No action is applied to attributes where the key already exists.\n Either Value, FromAttribute or FromContext must be set.\nUPDATE - Updates an existing key with a value. No action is applied\n to attributes where the key does not exist.\n Either Value, FromAttribute or FromContext must be set.\nUPSERT - Performs insert or update action depending on the attributes\n containing the key. The key/value is inserted to attributes\n that did not originally have the key. The key/value is updated\n for attributes where the key already existed.\n Either Value, FromAttribute or FromContext must be set.\nDELETE - Deletes the attribute. If the key doesn't exist,\n no action is performed.\nHASH - Calculates the SHA-1 hash of an existing value and overwrites the\n value with its SHA-1 hash result. If the feature gate\n `coreinternal.attraction.hash.sha256` is enabled, it uses SHA2-256\n instead.\nEXTRACT - Extracts values using a regular expression rule from the input\n 'key' to target keys specified in the 'rule'. If a target key\n already exists, it will be overridden.\nCONVERT - converts the type of an existing attribute, if convertable\nThis is a required field.","title":"action","type":"string"},"converted_type":{"description":"ConvertedType specifies the target type of an attribute to be converted\nIf the key doesn't exist, no action is performed.\nIf the value cannot be converted, the original value will be left as-is","title":"converted_type","type":"string"},"from_attribute":{"description":"FromAttribute specifies the attribute to use to populate\nthe value. If the attribute doesn't exist, no action is performed.","title":"from_attribute","type":"string"},"from_context":{"description":"FromContext specifies the context value to use to populate\nthe value. The values would be searched in client.Info.Metadata.\nIf the key doesn't exist, no action is performed.\nIf the key has multiple values the values will be joined with `;` separator.","title":"from_context","type":"string"},"key":{"description":"Key specifies the attribute to act upon.\nThis is a required field.","title":"key","type":"string"},"pattern":{"description":"A regex pattern must be specified for the action EXTRACT.\nIt uses the attribute specified by `key' to extract values from\nThe target keys are inferred based on the names of the matcher groups\nprovided and the names will be inferred based on the values of the\nmatcher group.\nNote: All subexpressions must have a name.\nNote: The value type of the source key must be a string. If it isn't,\nno extraction will occur.","title":"pattern","type":"string"},"value":{"description":"Value specifies the value to populate for the key.\nThe type of the value is inferred from the configuration.","title":"value"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.internal.coreinternal.consumerretry.Config":{"additionalProperties":false,"description":"Config defines configuration for retrying batches in case of receiving a retryable error from a downstream consumer.","properties":{"enabled":{"description":"Enabled indicates whether to not retry sending logs in case of receiving a retryable error from a downstream\nconsumer. Default is false.","title":"enabled","type":"boolean"},"initial_interval":{"description":"InitialInterval the time to wait after the first failure before retrying. Default value is 1 second.","title":"initial_interval","type":"string"},"max_elapsed_time":{"description":"MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a logs batch to\na downstream consumer. Once this value is reached, the data is discarded. It never stops if MaxElapsedTime == 0.\nDefault value is 5 minutes.","title":"max_elapsed_time","type":"string"},"max_interval":{"description":"MaxInterval is the upper bound on backoff interval. Once this value is reached the delay between\nconsecutive retries will always be `MaxInterval`. Default value is 30 seconds.","title":"max_interval","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.internal.filter.filterconfig.Attribute":{"additionalProperties":false,"description":"Attribute specifies the attribute key and optional value to match against.","properties":{"key":{"description":"Key specifies the attribute key.","title":"key","type":"string"},"value":{"description":"Values specifies the value to match against.\nIf it is not set, any value will match.","title":"value"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.internal.filter.filterconfig.InstrumentationLibrary":{"additionalProperties":false,"description":"InstrumentationLibrary specifies the instrumentation library and optional version to match against.","properties":{"name":{"title":"name","type":"string"},"version":{"description":"version match\n expected actual match\n nil \u003cblank\u003e yes\n nil 1 yes\n \u003cblank\u003e \u003cblank\u003e yes\n \u003cblank\u003e 1 no\n 1 \u003cblank\u003e no\n 1 1 yes","title":"version","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.internal.filter.filterconfig.LogSeverityNumberMatchProperties":{"additionalProperties":false,"description":"LogSeverityNumberMatchProperties defines how to match based on a log record's SeverityNumber field.","properties":{"match_undefined":{"description":"MatchUndefined controls whether logs with \"undefined\" severity matches.\nIf this is true, entries with undefined severity will match.","title":"match_undefined","type":"boolean"},"min":{"description":"Min is the lowest severity that may be matched.\ne.g. if this is plog.SeverityNumberInfo, INFO, WARN, ERROR, and FATAL logs will match.","title":"min","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.internal.filter.filterconfig.MatchConfig":{"additionalProperties":false,"description":"MatchConfig has two optional MatchProperties one to define what is processed by the processor, captured under the 'include' and the second, exclude, to define what is excluded from the processor.","properties":{"exclude":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.internal.filter.filterconfig.MatchProperties","description":"Exclude specifies when this processor will not be applied to the input data\nwhich match the specified properties.\nNote: The `exclude` properties are checked after the `include` properties,\nif they exist, are checked.\nIf `include` isn't specified, the `exclude` properties are checked against\nall input data.\nThis is an optional field. If neither `include` and `exclude` are set, all input data\nis processed. If `exclude` is set and `include` isn't set, then all the\ninput data that does not match the properties in this structure are processed.","title":"exclude"},"include":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.internal.filter.filterconfig.MatchProperties","description":"Include specifies the set of input data properties that must be present in order\nfor this processor to apply to it.\nNote: If `exclude` is specified, the input data is compared against those\nproperties after the `include` properties.\nThis is an optional field. If neither `include` and `exclude` are set, all input data\nare processed. If `include` is set and `exclude` isn't set, then all\ninput data matching the properties in this structure are processed.","title":"include"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.internal.filter.filterconfig.MatchProperties":{"additionalProperties":false,"description":"MatchProperties specifies the set of properties in a spans/log/metric to match against and if the input data should be included or excluded from the processor.","properties":{"attributes":{"description":"Attributes specifies the list of attributes to match against.\nAll of these attributes must match exactly for a match to occur.\nOnly match_type=strict is allowed if \"attributes\" are specified.\nThis is an optional field.","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.internal.filter.filterconfig.Attribute"},"title":"attributes","type":"array"},"libraries":{"description":"Libraries specify the list of items to match the implementation library against.\nA match occurs if the span's implementation library matches at least one item in this list.\nThis is an optional field.","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.internal.filter.filterconfig.InstrumentationLibrary"},"title":"libraries","type":"array"},"log_bodies":{"description":"LogBodies is a list of strings that the LogRecord's body field must match\nagainst.","items":{"type":"string"},"title":"log_bodies","type":"array"},"log_severity_number":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.internal.filter.filterconfig.LogSeverityNumberMatchProperties","description":"LogSeverityNumber defines how to match against a log record's SeverityNumber, if defined.","title":"log_severity_number"},"log_severity_texts":{"description":"LogSeverityTexts is a list of strings that the LogRecord's severity text field must match\nagainst.","items":{"type":"string"},"title":"log_severity_texts","type":"array"},"match_type":{"title":"match_type","type":"string"},"metric_names":{"description":"MetricNames is a list of strings to match metric name against.\nA match occurs if metric name matches at least one item in the list.\nThis field is optional.","items":{"type":"string"},"title":"metric_names","type":"array"},"regexp":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.internal.filter.filterset.regexp.Config","title":"regexp"},"resources":{"description":"Resources specify the list of items to match the resources against.\nA match occurs if the data's resources match at least one item in this list.\nThis is an optional field.","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.internal.filter.filterconfig.Attribute"},"title":"resources","type":"array"},"services":{"description":"Services specify the list of items to match service name against.\nA match occurs if the span's service name matches at least one item in this list.\nThis is an optional field.","items":{"type":"string"},"title":"services","type":"array"},"span_kinds":{"description":"SpanKinds specify the list of items to match the span kind against.\nA match occurs if the span's span kind matches at least one item in this list.\nThis is an optional field","items":{"type":"string"},"title":"span_kinds","type":"array"},"span_names":{"description":"SpanNames specify the list of items to match span name against.\nA match occurs if the span name matches at least one item in this list.\nThis is an optional field.","items":{"type":"string"},"title":"span_names","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.internal.filter.filterconfig.MetricMatchProperties":{"additionalProperties":false,"description":"MetricMatchProperties specifies the set of properties in a metric to match against and the type of string pattern matching to use.","properties":{"expressions":{"description":"Expressions specifies the list of expr expressions to match metrics against.\nA match occurs if any datapoint in a metric matches at least one expression in this list.","items":{"type":"string"},"title":"expressions","type":"array"},"match_type":{"description":"MatchType specifies the type of matching desired","title":"match_type","type":"string"},"metric_names":{"description":"MetricNames specifies the list of string patterns to match metric names against.\nA match occurs if the metric name matches at least one string pattern in this list.","items":{"type":"string"},"title":"metric_names","type":"array"},"regexp":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.internal.filter.filterset.regexp.Config","description":"RegexpConfig specifies options for the MetricRegexp match type","title":"regexp"},"resource_attributes":{"description":"ResourceAttributes defines a list of possible resource attributes to match metrics against.\nA match occurs if any resource attribute matches all expressions in this given list.","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.internal.filter.filterconfig.Attribute"},"title":"resource_attributes","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.internal.filter.filterset.regexp.Config":{"additionalProperties":false,"description":"Config represents the options for a NewFilterSet.","properties":{"cacheenabled":{"description":"CacheEnabled determines whether match results are LRU cached to make subsequent matches faster.\nCache size is unlimited unless CacheMaxNumEntries is also specified.","title":"cacheenabled","type":"boolean"},"cachemaxnumentries":{"description":"CacheMaxNumEntries is the max number of entries of the LRU cache that stores match results.\nCacheMaxNumEntries is ignored if CacheEnabled is false.","title":"cachemaxnumentries","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.internal.k8sconfig.APIConfig":{"additionalProperties":false,"description":"APIConfig contains options relevant to connecting to the K8s API","properties":{"auth_type":{"description":"How to authenticate to the K8s API server. This can be one of `none`\n(for no auth), `serviceAccount` (to use the standard service account\ntoken provided to the agent pod), or `kubeConfig` to use credentials\nfrom `~/.kube/config`.","title":"auth_type","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.internal.splunk.HecToOtelAttrs":{"additionalProperties":false,"description":"HecToOtelAttrs defines the mapping of Splunk HEC metadata to attributes","properties":{"host":{"description":"Host indicates the mapping of the host field to a specific unified model attribute.","title":"host","type":"string"},"index":{"description":"Index indicates the mapping of the index field to a specific unified model attribute.","title":"index","type":"string"},"source":{"description":"Source indicates the mapping of the source field to a specific unified model attribute.","title":"source","type":"string"},"sourcetype":{"description":"SourceType indicates the mapping of the sourcetype field to a specific unified model attribute.","title":"sourcetype","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.pkg.resourcetotelemetry.Settings":{"additionalProperties":false,"description":"Settings defines configuration for converting resource attributes to telemetry attributes.","markdownDescription":"# Resource to Telemetry\n\nThis is an exporter helper for converting resource attributes to telemetry attributes.\nThis helper can be used to wrap other exporters.\n\n\u003e :warning: This exporter helper should not be added to a service pipeline.\n\n## Configuration\n\nThe following configuration options can be modified:\n\n- `resource_to_telemetry_conversion`\n - `enabled` (default = false): If `enabled` is `true`, all the resource attributes will be converted to metric labels by default.","properties":{"enabled":{"description":"Enabled indicates whether to convert resource attributes to telemetry attributes. Default is `false`.","title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.pkg.stanza.fileconsumer.HeaderConfig":{"additionalProperties":false,"properties":{"metadata_operators":{"items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.pkg.stanza.operator.Config"},"title":"metadata_operators","type":"array"},"pattern":{"title":"pattern","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.pkg.stanza.fileconsumer.OrderingCriteria":{"additionalProperties":false,"properties":{"regex":{"title":"regex","type":"string"},"sort_by":{"items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.pkg.stanza.fileconsumer.SortRuleImpl"},"title":"sort_by","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.pkg.stanza.fileconsumer.SortRuleImpl":{"additionalProperties":false,"properties":{},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.pkg.stanza.operator.Config":{"additionalProperties":false,"description":"Config is the configuration of an operator","properties":{"Builder":{"title":"Builder"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.pkg.stanza.operator.helper.MultilineConfig":{"additionalProperties":false,"description":"MultilineConfig is the configuration of a multiline helper","properties":{"line_end_pattern":{"title":"line_end_pattern","type":"string"},"line_start_pattern":{"title":"line_start_pattern","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.attributesprocessor.Config":{"additionalProperties":false,"description":"Config specifies the set of attributes to be inserted, updated, upserted and deleted and the properties to include/exclude a span from being processed.","markdownDescription":"# Attributes Processor\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: traces, metrics, logs |\n| Distributions | [core], [contrib], [aws], [observiq], [splunk], [sumo] |\n| Warnings | [Identity Conflict](#warnings) |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[aws]: https://github.com/aws-observability/aws-otel-collector\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe attributes processor modifies attributes of a span, log, or metric. Please refer to\n[config.go](./config.go) for the config spec.\n\nThis processor also supports the ability to filter and match input data to determine\nif they should be [included or excluded](#includeexclude-filtering) for specified actions.\n\nIt takes a list of actions which are performed in order specified in the config.\nThe supported actions are:\n- `insert`: Inserts a new attribute in input data where the key does not already exist.\n- `update`: Updates an attribute in input data where the key does exist.\n- `upsert`: Performs insert or update. Inserts a new attribute in input data where the\n key does not already exist and updates an attribute in input data where the key\n does exist.\n- `delete`: Deletes an attribute from the input data.\n- `hash`: Hashes (SHA1) an existing attribute value.\n- `extract`: Extracts values using a regular expression rule from the input key\n to target keys specified in the rule. If a target key already exists, it will\n be overridden. Note: It behaves similar to the Span Processor `to_attributes`\n setting with the existing attribute as the source.\n- `convert`: Converts an existing attribute to a specified type.\n\nFor the actions `insert`, `update` and `upsert`,\n - `key` is required\n - one of `value`, `from_attribute` or `from_context` is required\n - `action` is required.\n```yaml\n # Key specifies the attribute to act upon.\n- key: \u003ckey\u003e\n action: {insert, update, upsert}\n # Value specifies the value to populate for the key.\n # The type is inferred from the configuration.\n value: \u003cvalue\u003e\n\n # Key specifies the attribute to act upon.\n- key: \u003ckey\u003e\n action: {insert, update, upsert}\n # FromAttribute specifies the attribute from the input data to use to populate\n # the value. If the attribute doesn't exist, no action is performed.\n from_attribute: \u003cother key\u003e\n\n # Key specifies the attribute to act upon.\n- key: \u003ckey\u003e\n action: {insert, update, upsert}\n # FromContext specifies the context value to use to populate the attribute value. \n # If the key is prefixed with `metadata.`, the values are searched\n # in the receiver's transport protocol additional information like gRPC Metadata or HTTP Headers. \n # If the key is prefixed with `auth.`, the values are searched\n # in the authentication information set by the server authenticator. \n # Refer to the server authenticator's documentation part of your pipeline for more information about which attributes are available.\n # If the key doesn't exist, no action is performed.\n # If the key has multiple values the values will be joined with `;` separator.\n from_context: \u003cother key\u003e\n```\n\nFor the `delete` action,\n - `key` and/or `pattern` is required\n - `action: delete` is required.\n```yaml\n# Key specifies the attribute to act upon.\n- key: \u003ckey\u003e\n action: delete\n # Rule specifies the regex pattern for attribute names to act upon.\n pattern: \u003cregular pattern\u003e\n```\n\n\nFor the `hash` action,\n - `key` and/or `pattern` is required\n - `action: hash` is required.\n```yaml\n# Key specifies the attribute to act upon.\n- key: \u003ckey\u003e\n action: hash\n # Rule specifies the regex pattern for attribute names to act upon.\n pattern: \u003cregular pattern\u003e\n```\n\n\nFor the `extract` action,\n - `key` is required\n - `pattern` is required.\n ```yaml\n # Key specifies the attribute to extract values from.\n # The value of `key` is NOT altered.\n- key: \u003ckey\u003e\n # Rule specifies the regex pattern used to extract attributes from the value\n # of `key`.\n # The submatchers must be named.\n # If attributes already exist, they will be overwritten.\n pattern: \u003cregular pattern with named matchers\u003e\n action: extract\n\n ```\n\n\nFor the `convert` action,\n - `key` is required\n - `action: convert` is required.\n - `converted_type` is required and must be one of int, double or string\n```yaml\n# Key specifies the attribute to act upon.\n- key: \u003ckey\u003e\n action: convert\n converted_type: \u003cint|double|string\u003e\n```\n\nThe list of actions can be composed to create rich scenarios, such as\nback filling attribute, copying values to a new key, redacting sensitive information.\nThe following is a sample configuration.\n\n```yaml\nprocessors:\n attributes/example:\n actions:\n - key: db.table\n action: delete\n - key: redacted_span\n value: true\n action: upsert\n - key: copy_key\n from_attribute: key_original\n action: update\n - key: account_id\n value: 2245\n action: insert\n - key: account_password\n action: delete\n - key: account_email\n action: hash\n - key: http.status_code\n action: convert\n converted_type: int\n\n```\n\nRefer to [config.yaml](./testdata/config.yaml) for detailed\nexamples on using the processor.\n\n### Attributes Processor for Metrics vs. [Metric Transform Processor](../metricstransformprocessor)\n\nRegarding metric support, these two processors have overlapping functionality. They can both do simple modifications\nof metric attribute key-value pairs. As a general rule the attributes processor has more attribute related\nfunctionality, while the metrics transform processor can do much more data manipulation. The attributes processor\nis preferred when the only needed functionality is overlapping, as it natively uses the official OpenTelemetry\ndata model. However, if the metric transform processor is already in use or its extra functionality is necessary,\nthere's no need to migrate away from it.\n\nShared functionality\n* Add attributes\n* Update values of attributes\n\nAttribute processor specific functionality\n* delete\n* hash\n* extract\n\nMetric transform processor specific functionality\n* Rename metrics\n* Delete data points\n* Toggle data type\n* Scale value\n* Aggregate across label sets\n* Aggregate across label values\n\n## Include/Exclude Filtering\n\nThe [attribute processor](README.md) exposes an option to provide a set of properties of a span, log \nor metric record to match against to determine if the input data should be included or excluded from\nthe processor. To configure this option, under `include` and/or `exclude` at least `match_type` and \none of the following is required:\n- For spans, one of `services`, `span_names`, `span_kinds`, `attributes`, `resources` or `libraries` \nmust be specified with a non-empty value for a valid configuration. The `log_bodies`, `log_severity_texts`, \n`log_severity_number` and `metric_names` fields are invalid.\n- For logs, one of `log_bodies`, `log_severity_texts`, `log_severity_number`, `attributes`, `resources`\nor `libraries` must be specified with a non-empty value for a valid configuration. The `span_names`, \n`span_kinds`, `metric_names` and `services` fields are invalid.\n- For metrics, one of `metric_names` or `resources` must be specified with a valid non-empty value for\na valid configuration. The `span_names`, `span_kinds`, `log_bodies`, `log_severity_texts`, \n`log_severity_number`, `services`, `attributes` and `libraries` fields are invalid.\n\n\nNote: If both `include` and `exclude` are specified, the `include` properties\nare checked before the `exclude` properties.\n\n```yaml\nattributes:\n # include and/or exclude can be specified. However, the include properties\n # are always checked before the exclude properties.\n {include, exclude}:\n # At least one of services, span_names or attributes must be specified.\n # It is supported to have more than one specified, but all of the specified\n # conditions must evaluate to true for a match to occur.\n\n # match_type controls how items in \"services\" and \"span_names\" arrays are\n # interpreted. Possible values are \"regexp\" or \"strict\".\n # This is a required field.\n match_type: {strict, regexp}\n\n # regexp is an optional configuration section for match_type regexp.\n regexp:\n # \u003c see \"Match Configuration\" below \u003e\n\n # services specify an array of items to match the service name against.\n # A match occurs if the span service name matches at least one of the items.\n # This is an optional field.\n services: [\u003citem1\u003e, ..., \u003citemN\u003e]\n\n # resources specifies a list of resources to match against.\n # A match occurs if the input data resources matches at least one of the items.\n # This is an optional field.\n resources:\n # Key specifies the resource to match against.\n - key: \u003ckey\u003e\n # Value specifies the exact value to match against.\n # If not specified, a match occurs if the key is present in the resources.\n value: {value}\n\n # libraries specify a list of items to match the implementation library against.\n # A match occurs if the input data implementation library matches at least one of the items.\n # This is an optional field.\n libraries: [\u003citem1\u003e, ..., \u003citemN\u003e]\n # Name specifies the library to match against.\n - name: \u003cname\u003e\n # Version specifies the exact version to match against.\n # This is an optional field.\n # If the field is not set, any version will match.\n # If the field is set to an empty string, only an\n # empty string version will match.\n version: {version}\n\n # The span name must match at least one of the items.\n # This is an optional field.\n span_names: [\u003citem1\u003e, ..., \u003citemN\u003e]\n\n # The span kind must match at least one of the items.\n # This is an optional field.\n span_kinds: [\u003citem1\u003e, ..., \u003citemN\u003e]\n\n # The log body must match at least one of the items.\n # Currently only string body types are supported.\n # This is an optional field.\n log_bodies: [\u003citem1\u003e, ..., \u003citemN\u003e]\n\n # The log severity text must match at least one of the items.\n # This is an optional field.\n log_severity_texts: [\u003citem1\u003e, ..., \u003citemN\u003e]\n\n # The log severity number defines how to match against a log record's\n # SeverityNumber, if defined.\n # This is an optional field.\n log_severity_number:\n # Min is the lowest severity that may be matched.\n # e.g. if this is plog.SeverityNumberInfo, \n # INFO, WARN, ERROR, and FATAL logs will match.\n min: \u003cint\u003e\n # MatchUndefined controls whether logs with \"undefined\" severity matches.\n # If this is true, entries with undefined severity will match.\n match_undefined: \u003cbool\u003e\n\n # The metric name must match at least one of the items.\n # This is an optional field.\n metric_names: [\u003citem1\u003e, ..., \u003citemN\u003e]\n\n # Attributes specifies the list of attributes to match against.\n # All of these attributes must match exactly for a match to occur.\n # This is an optional field.\n attributes:\n # Key specifies the attribute to match against.\n - key: \u003ckey\u003e\n # Value specifies the exact value to match against.\n # If not specified, a match occurs if the key is present in the attributes.\n value: {value}\n```\n\n### Match Configuration\n\nSome `match_type` values have additional configuration options that can be\nspecified. The `match_type` value is the name of the configuration section.\nThese sections are optional.\n\n```yaml\n# regexp is an optional configuration section for match_type regexp.\nregexp:\n # cacheenabled determines whether match results are LRU cached to make subsequent matches faster.\n # Cache size is unlimited unless cachemaxnumentries is also specified.\n cacheenabled: \u003cbool\u003e\n # cachemaxnumentries is the max number of entries of the LRU cache; ignored if cacheenabled is false.\n cachemaxnumentries: \u003cint\u003e\n```\n\n## Warnings\n\nIn general, the Attributes processor is a very safe processor to use. Care only needs to be taken when modifying data point attributes:\n- [Identity Conflict](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/standard-warnings.md#identity-conflict): Reducing/changing existing data point attributes has the potential to create an identity conflict since the Attributes processor does not perform any re-aggregation of the data points. Adding new attributes to data points is safe.","properties":{"actions":{"description":"Actions specifies the list of attributes to act on.\nThe set of actions are {INSERT, UPDATE, UPSERT, DELETE, HASH, EXTRACT, CONVERT}.\nThis is a required field.","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.internal.coreinternal.attraction.ActionKeyValue"},"title":"actions","type":"array"},"exclude":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.internal.filter.filterconfig.MatchProperties","description":"Exclude specifies when this processor will not be applied to the input data\nwhich match the specified properties.\nNote: The `exclude` properties are checked after the `include` properties,\nif they exist, are checked.\nIf `include` isn't specified, the `exclude` properties are checked against\nall input data.\nThis is an optional field. If neither `include` and `exclude` are set, all input data\nis processed. If `exclude` is set and `include` isn't set, then all the\ninput data that does not match the properties in this structure are processed.","title":"exclude"},"include":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.internal.filter.filterconfig.MatchProperties","description":"Include specifies the set of input data properties that must be present in order\nfor this processor to apply to it.\nNote: If `exclude` is specified, the input data is compared against those\nproperties after the `include` properties.\nThis is an optional field. If neither `include` and `exclude` are set, all input data\nare processed. If `include` is set and `exclude` isn't set, then all\ninput data matching the properties in this structure are processed.","title":"include"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.cumulativetodeltaprocessor.Config":{"additionalProperties":false,"description":"Config defines the configuration for the processor.","markdownDescription":"# Cumulative to Delta Processor\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: metrics |\n| Distributions | [contrib], [aws], [observiq], [sumo] |\n| Warnings | [Statefulness](#warnings) |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[aws]: https://github.com/aws-observability/aws-otel-collector\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\n## Description\n\nThe cumulative to delta processor (`cumulativetodeltaprocessor`) converts monotonic, cumulative sum and histogram metrics to monotonic, delta metrics. Non-monotonic sums and exponential histograms are excluded.\n\n## Configuration\n\nConfiguration is specified through a list of metrics. The processor uses metric names to identify a set of cumulative metrics and converts them from cumulative to delta.\n\nThe following settings can be optionally configured:\n\n- `include`: List of metrics names or patterns to convert to delta.\n- `exclude`: List of metrics names or patterns to not convert to delta. **If a metric name matches both include and exclude, exclude takes precedence.**\n- `max_staleness`: The total time a state entry will live past the time it was last seen. Set to 0 to retain state indefinitely. Default: 0\n- `initial_value`: Handling of the first observed point for a given metric identity.\n When the collector (re)starts, there's no record of how much of a given cumulative counter has already been converted to delta values.\n - `auto` (default): Send if and only if the startime is set AND the starttime happens after the component started AND the starttime is different from the timestamp.\n Suitable for gateway deployments, this heuristic is like `drop`, but keeps values for newly started counters (which could not have had previous observed values).\n - `keep`: Send the observed value as the delta value.\n Suitable for when the incoming metrics have not been observed before,\n e.g. running the collector as a sidecar, the collector lifecycle is tied to the metric source.\n - `drop`: Keep the observed value but don't send.\n Suitable for gateway deployments, guarantees that all delta counts it produces haven't been observed before, but loses the values between thir first 2 observations.\n\nIf neither include nor exclude are supplied, no filtering is applied.\n\n#### Examples\n\n```yaml\nprocessors:\n # processor name: cumulativetodelta\n cumulativetodelta:\n\n # list the exact cumulative sum or histogram metrics to convert to delta\n include:\n metrics:\n - \u003cmetric_1_name\u003e\n - \u003cmetric_2_name\u003e\n .\n .\n - \u003cmetric_n_name\u003e\n match_type: strict\n```\n\n```yaml\nprocessors:\n # processor name: cumulativetodelta\n cumulativetodelta:\n\n # Convert cumulative sum or histogram metrics to delta\n # if and only if 'metric' is in the name\n include:\n metrics:\n - \"*metric*\"\n match_type: regexp\n```\n\n```yaml\nprocessors:\n # processor name: cumulativetodelta\n cumulativetodelta:\n\n # Convert cumulative sum or histogram metrics to delta\n # if and only if 'metric' is not in the name\n exclude:\n metrics:\n - \"*metric*\"\n match_type: regexp\n```\n\n```yaml\nprocessors:\n # processor name: cumulativetodelta\n cumulativetodelta:\n # If include/exclude are not specified\n # convert all cumulative sum or histogram metrics to delta\n```\n\n## Warnings\n\n- [Statefulness](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/standard-warnings.md#statefulness): The cumulativetodelta processor's calculates delta by remembering the previous value of a metric. For this reason, the calculation is only accurate if the metric is continuously sent to the same instance of the collector. As a result, the cumulativetodelta processor may not work as expected if used in a deployment of multiple collectors. When using this processor it is best for the data source to being sending data to a single collector.\n\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib","properties":{"exclude":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.cumulativetodeltaprocessor.MatchMetrics","title":"exclude"},"include":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.cumulativetodeltaprocessor.MatchMetrics","description":"Include specifies a filter on the metrics that should be converted.\nExclude specifies a filter on the metrics that should not be converted.\nIf neither `include` nor `exclude` are set, all metrics will be converted.\nCannot be used with deprecated Metrics config option.","title":"include"},"initial_value":{"description":"InitialValue determines how to handle the first datapoint for a given metric. Valid values:\n\n - auto: (default) send the first point iff the startime is set AND the starttime happens after the component started AND the starttime is different from the timestamp\n - keep: always send the first point\n - drop: don't send the first point, but store it for subsequent delta calculations","title":"initial_value","type":"integer"},"max_staleness":{"description":"MaxStaleness is the total time a state entry will live past the time it was last seen. Set to 0 to retain state indefinitely.","title":"max_staleness","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.cumulativetodeltaprocessor.MatchMetrics":{"additionalProperties":false,"properties":{"match_type":{"title":"match_type","type":"string"},"metrics":{"items":{"type":"string"},"title":"metrics","type":"array"},"regexp":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.internal.filter.filterset.regexp.Config","title":"regexp"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.datadogprocessor.Config":{"additionalProperties":false,"description":"Config defines the configuration options for datadogprocessor.","markdownDescription":"# Datadog Processor\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: traces |\n| Distributions | [contrib] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n\u003c!-- end autogenerated section --\u003e\n\n## Description\n\nThe Datadog Processor can be used to compute Datadog APM Stats pre-sampling. For example, when using the [tailsamplingprocessor](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/tailsamplingprocessor#tail-sampling-processor) or [probabilisticsamplerprocessor](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/probabilisticsamplerprocessor) components, the `datadogprocessor` can be prepended into the pipeline to ensure that Datadog APM Stats are accurate and include the dropped traces.\n\n## Usage\n\nTo use the Datadog Processor, simply prepend it into a pipeline before any sampling processor. The Datadog Processor will compute APM Stats on all spans that it sees. Here is an example on how to add it to a pipeline using the [probabilisticsampler](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/probabilisticsamplerprocessor):\n\n\u003ctable\u003e\n\u003ctr\u003e\n\u003ctd\u003e Before \u003c/td\u003e \u003ctd\u003e After \u003c/td\u003e\n\u003c/tr\u003e\n\u003ctr\u003e\n\u003ctd valign=\"top\"\u003e\n\n```yaml\n# ...\nprocessors:\n # ...\n probabilistic_sampler:\n sampling_percentage: 20\n\nexporters:\n datadog:\n api:\n key: ${env:DD_API_KEY}\n\nservice:\n pipelines:\n metrics:\n receivers: [otlp]\n processors: [batch]\n exporters: [datadog]\n traces:\n receivers: [otlp]\n processors: [batch, probabilistic_sampler]\n exporters: [datadog]\n```\n\n\u003c/td\u003e\u003ctd valign=\"top\"\u003e\n\n```yaml\n# ...\nprocessors:\n # ...\n probabilistic_sampler:\n sampling_percentage: 20\n # add the \"datadog\" processor definition\n datadog:\n\nexporters:\n datadog:\n api:\n key: ${env:DD_API_KEY}\n\nservice:\n pipelines:\n metrics:\n receivers: [otlp]\n processors: [batch]\n exporters: [datadog]\n traces:\n receivers: [otlp]\n # prepend it to the sampler in your pipeline:\n processors: [batch, datadog, probabilistic_sampler]\n exporters: [datadog]\n```\n\n\u003c/tr\u003e\u003c/table\u003e\n\nSimply add the Datadog Processor into your list of processors and prepend it to the sampler in the traces pipeline to ensure it sees all spans.\n\n## Configuration\n\nBy default, when used in conjunction with the Datadog Exporter, the processor should detect its presence (as long as it is configured within a pipeline), and use it to export the Datadog APM Stats. No configuration needed!\n\nIf using within a gateway deployment or running alongside the Datadog Agent where the Datadog Exporter is not present, then you must specify an alternative exporter to use, such as for example an OTLP exporter:\n\n```yaml\nprocessors:\n datadog:\n metrics_exporter: otlp\n```\n\nThe default value for `metrics_exporter` is `datadog`. If your Datadog Exporter has a different name, you must specify it via config. Any configured metrics exporter must exist as part of a metrics pipeline.\n\nWhen using in conjunction with the Datadog Agent's OTLP Ingest, the minimum required Datadog Agent version that supports this processor is 7.42.0.\n\nIf not using the Datadog backend, the processor will still create valid RED metrics, but in that situation you may prefer to use the [spanmetricsprocessor](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/spanmetricsprocessor) instead.","properties":{"metrics_exporter":{"description":"MetricsExporter specifies the name of the metrics exporter to be used when\nexporting stats metrics.","title":"metrics_exporter","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.deltatorateprocessor.Config":{"additionalProperties":false,"description":"Config defines the configuration for the processor.","markdownDescription":"# Delta to Rate Processor\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [development]: metrics |\n| Distributions | [contrib], [aws], [observiq], [sumo] |\n\n[development]: https://github.com/open-telemetry/opentelemetry-collector#development\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[aws]: https://github.com/aws-observability/aws-otel-collector\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\n**Status: under development; Not recommended for production usage.**\n\n## Description\n\nThe delta to rate processor (`deltatorateprocessor`) converts delta sum metrics to rate metrics. This rate is a gauge. \n\n## Configuration\n\nConfiguration is specified through a list of metrics. The processor uses metric names to identify a set of delta sum metrics and calculates the rates which are gauges.\n\n```yaml\nprocessors:\n # processor name: deltatorate\n deltatorate:\n\n # list the delta sum metrics to calculate the rate. This is a required field.\n metrics:\n - \u003cmetric_1_name\u003e\n - \u003cmetric_2_name\u003e\n .\n .\n - \u003cmetric_n_name\u003e\n```\n\n[development]: https://github.com/open-telemetry/opentelemetry-collector#development\n[contrib]:https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib","properties":{"metrics":{"description":"List of delta sum metrics to convert to rates","items":{"type":"string"},"title":"metrics","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.filterprocessor.Config":{"additionalProperties":false,"description":"Config defines configuration for Resource processor.","markdownDescription":"# Filter Processor\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [alpha]: traces, metrics, logs |\n| Distributions | [core], [contrib], [aws], [observiq], [splunk], [sumo] |\n| Warnings | [Orphaned Telemetry, Other](#warnings) |\n\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha\n[core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[aws]: https://github.com/aws-observability/aws-otel-collector\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe filterprocessor allows dropping spans, span events, metrics, datapoints, and logs from the collector.\n\n## Configuration\n\nThe filterprocessor utilizes the [OpenTelemetry Transformation Language](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/README.md)\nto create conditions that determine when telemetry should be dropped.\nIf **any** condition is met, the telemetry is dropped (each condition is ORed together).\nEach configuration option corresponds with a different type of telemetry and OTTL Context.\nSee the table below for details on each context and the fields it exposes.\n\n| Config | OTTL Context |\n|---------------------|------------------------------------------------------------------------------------------------------------------------------------|\n| `traces.span` | [Span](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/contexts/ottlspan/README.md) |\n| `traces.spanevent` | [SpanEvent](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/contexts/ottlspanevent/README.md) |\n| `metrics.metric` | [Metric](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/contexts/ottlmetric/README.md) |\n| `metrics.datapoint` | [DataPoint](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/contexts/ottldatapoint/README.md) |\n| `logs.log_record` | [Log](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/contexts/ottllog/README.md) |\n\nThe OTTL allows the use of `and`, `or`, and `()` in conditions.\nSee [OTTL Boolean Expressions](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/README.md#boolean-expressions) for more details.\n\nFor conditions that apply to the same signal, such as spans and span events, if the \"higher\" level telemetry matches a condition and is dropped, the \"lower\" level condition will not be checked.\nThis means that if a span is dropped but a span event condition was defined, the span event condition will not be checked for that span.\nThe same relationship applies to metrics and datapoints.\n\nIf all span events for a span are dropped, the span will be left intact.\nIf all datapoints for a metric are dropped, the metric will also be dropped.\n\nThe filter processor also allows configuring an optional field, `error_mode`, which will determine how the processor reacts to errors that occur while processing an OTTL condition.\n\n| error_mode | description |\n|-----------------------|----------------------------------------------------------------------------------------------------------------------------|\n| ignore | The processor ignores errors returned by conditions and continues on to the next condition. This is the recommended mode. |\n| propagate | The processor returns the error up the pipeline. This will result in the payload being dropped from the collector. |\n\nIf not specified, `propagate` will be used.\n\n### Examples\n\n```yaml\nprocessors:\n filter/ottl:\n error_mode: ignore\n traces:\n span:\n - 'attributes[\"container.name\"] == \"app_container_1\"'\n - 'resource.attributes[\"host.name\"] == \"localhost\"'\n - 'name == \"app_3\"'\n spanevent:\n - 'attributes[\"grpc\"] == true'\n - 'IsMatch(name, \".*grpc.*\")'\n metrics:\n metric:\n - 'name == \"my.metric\" and resource.attributes[\"my_label\"] == \"abc123\"'\n - 'type == METRIC_DATA_TYPE_HISTOGRAM'\n datapoint:\n - 'metric.type == METRIC_DATA_TYPE_SUMMARY'\n - 'resource.attributes[\"service.name\"] == \"my_service_name\"'\n logs:\n log_record:\n - 'IsMatch(body, \".*password.*\")'\n - 'severity_number \u003c SEVERITY_NUMBER_WARN'\n```\n\n### OTTL Functions\n\nThe filter processor has access to all [OTTL Converter functions](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/ottlfuncs#converters)\n\nIn addition, the processor defines a few of its own functions:\n\n**Metrics only functions**\n- [HasAttrKeyOnDatapoint](#HasAttrKeyOnDatapoint)\n- [HasAttrOnDatapoint](#HasAttrOnDatapoint)\n\n#### HasAttrKeyOnDatapoint\n\n`HasAttrKeyOnDatapoint(key)`\n\nReturns `true` if the given key appears in the attribute map of any datapoint on a metric.\n`key` must be a string.\n\nExamples:\n\n- `HasAttrKeyOnDatapoint(\"http.method\")`\n\n#### HasAttrOnDatapoint\n\n`HasAttrOnDatapoint(key, value)`\n\nReturns `true` if the given key and value appears in the attribute map of any datapoint on a metric.\n`key` and `value` must both be strings.\n\nExamples:\n\n- `HasAttrOnDatapoint(\"http.method\", \"GET\")`\n\n## Alternative Config Options\n\nAll the following configurations can be expressed using OTTL configuration\nand may eventually be deprecated as part of [#18642](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/18642).\n\nThe filter processor can be configured to include or exclude:\n\n- Logs, based on resource attributes using the `strict` or `regexp` match types\n- Metrics based on metric name in the case of the `strict` or `regexp` match types,\n or based on other metric attributes in the case of the `expr` match type.\n Please refer to [config.go](./config.go) for the config spec.\n- Spans based on span names and resource attributes, all with full regex support\n\nIt takes a pipeline type, of which `logs` `metrics`, and `traces` are supported, followed\nby an action:\n\n- `include`: Any names NOT matching filters are excluded from remainder of pipeline\n- `exclude`: Any names matching filters are excluded from remainder of pipeline\n\nFor the actions the following parameters are required:\n\nFor logs:\n\n- `match_type`: `strict`|`regexp`\n- `resource_attributes`: ResourceAttributes defines a list of possible resource\n attributes to match logs against.\n A match occurs if any resource attribute matches all expressions in this given list.\n- `record_attributes`: RecordAttributes defines a list of possible record\n attributes to match logs against.\n A match occurs if any record attribute matches all expressions in this given list.\n- `severity_texts`: SeverityTexts defines a list of possible severity texts to match the logs against.\n A match occurs if the record matches any expression in this given list.\n- `bodies`: Bodies defines a list of possible log bodies to match the logs against.\n A match occurs if the record matches any expression in this given list.\n- `severity_number`: SeverityNumber defines how to match a record based on its SeverityNumber.\n The following can be configured for matching a log record's SeverityNumber:\n - `min`: Min defines the minimum severity with which a log record should match.\n e.g. if this is \"WARN\", all log records with \"WARN\" severity and above (WARN[2-4], ERROR[2-4], FATAL[2-4]) are matched.\n The list of valid severities that may be used for this option can be found [here](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model.md#displaying-severity). You may use either the numerical \"SeverityNumber\" or the \"Short Name\"\n - `match_undefined`: MatchUndefinedSeverity defines whether to match logs with undefined severity or not when using the `min_severity` matching option.\n By default, this is `false`.\n\nFor metrics:\n\n- `match_type`: `strict`|`regexp`|`expr`\n- `metric_names`: (only for a `match_type` of `strict` or `regexp`) list of strings\n or re2 regex patterns\n- `expressions`: (only for a `match_type` of `expr`) list of `expr` expressions\n (see \"Using an `expr` match_type\" below)\n- `resource_attributes`: ResourceAttributes defines a list of possible resource\n attributes to match metrics against.\n A match occurs if any resource attribute matches all expressions in this given list.\n\nThis processor uses [re2 regex][re2_regex] for regex syntax.\n\n[re2_regex]: https://github.com/google/re2/wiki/Syntax\n\nMore details can be found at [include/exclude metrics](../attributesprocessor/README.md#includeexclude-filtering).\n\nExamples:\n\n```yaml\nprocessors:\n filter/1:\n metrics:\n include:\n match_type: regexp\n metric_names:\n - prefix/.*\n - prefix_.*\n resource_attributes:\n - key: container.name\n value: app_container_1\n exclude:\n match_type: strict\n metric_names:\n - hello_world\n - hello/world\n filter/2:\n logs:\n include:\n match_type: strict\n resource_attributes:\n - key: host.name\n value: just_this_one_hostname\n filter/regexp:\n logs:\n include:\n match_type: regexp\n resource_attributes:\n - key: host.name\n value: prefix.*\n filter/regexp_record:\n logs:\n include:\n match_type: regexp\n record_attributes:\n - key: record_attr\n value: prefix_.*\n # Filter on severity text field\n filter/severity_text:\n logs:\n include:\n match_type: regexp\n severity_texts:\n - INFO[2-4]?\n - WARN[2-4]?\n - ERROR[2-4]?\n # Filter out logs below INFO (no DEBUG or TRACE level logs),\n # retaining logs with undefined severity\n filter/severity_number:\n logs:\n include:\n severity_number:\n min: \"INFO\"\n match_undefined: true\n filter/bodies:\n logs:\n include:\n match_type: regexp\n bodies:\n - ^IMPORTANT RECORD\n```\n\nRefer to the config files in [testdata](./testdata) for detailed\nexamples on using the processor.\n\n### Using an \"expr\" match_type\n\nIn addition to matching metric names with the `strict` or `regexp` match types, the filter processor\nsupports matching entire `Metric`s using the [expr](https://github.com/antonmedv/expr) expression engine.\n\nThe `expr` filter evaluates the supplied boolean expressions _per datapoint_ on a metric, and returns a result\nfor the entire metric. If any datapoint evaluates to true then the entire metric evaluates to true, otherwise\nfalse.\n\nMade available to the expression environment are the following:\n\n* `MetricName`\n a variable containing the current Metric's name\n* `MetricType`\n a variable containing the current Metric's type: \"Gauge\", \"Sum\", \"Histogram\", \"ExponentialHistogram\" or \"Summary\".\n* `Label(name)`\n a function that takes a label name string as an argument and returns a string: the value of a label with that\n name if one exists, or \"\"\n* `HasLabel(name)`\n a function that takes a label name string as an argument and returns a boolean: true if the datapoint has a label\n with that name, false otherwise\n\nExample:\n\n```yaml\nprocessors:\n filter/1:\n metrics:\n exclude:\n match_type: expr\n expressions:\n - MetricName == \"my.metric\" \u0026\u0026 Label(\"my_label\") == \"abc123\"\n - MetricType == \"Histogram\"\n```\n\nThe above config will filter out any Metric that both has the name \"my.metric\" and has at least one datapoint\nwith a label of 'my_label=\"abc123\"'.\n\n### Support for multiple expressions\n\nAs with `strict` and `regexp`, multiple `expr` expressions are allowed.\n\nFor example, the following two filters have the same effect: they filter out metrics named \"system.cpu.time\" and\n\"system.disk.io\". \n\n```yaml\nprocessors:\n filter/expr:\n metrics:\n exclude:\n match_type: expr\n expressions:\n - MetricName == \"system.cpu.time\"\n - MetricName == \"system.disk.io\"\n filter/strict:\n metrics:\n exclude:\n match_type: strict\n metric_names:\n - system.cpu.time\n - system.disk.io\n```\n\nThe expressions are effectively ORed per datapoint. So for the above `expr` configuration, given a datapoint, if its\nparent Metric's name is \"system.cpu.time\" or \"system.disk.io\" then there's a match. The conditions are tested against\nall the datapoints in a Metric until there's a match, in which case the entire Metric is considered a match, and in\nthe above example the Metric will be excluded. If after testing all the datapoints in a Metric against all the\nexpressions there isn't a match, the entire Metric is considered to be not matching.\n\n\n### Filter metrics using resource attributes\nIn addition to the names, metrics can be filtered using resource attributes. `resource_attributes` takes a list of resource attributes to filter metrics against. \n\nFollowing example will include only the metrics coming from `app_container_1` (the value for `container.name` resource attribute is `app_container_1`). \n\n```yaml\nprocessors:\n filter/resource_attributes_include:\n metrics:\n include:\n match_type: strict\n metric_names:\n - hello_world\n - hello/world\n resource_attributes:\n - key: container.name\n value: app_container_1\n```\n\nFollowing example will exclude all the metrics coming from `app_container_1` (the value for `container.name` resource attribute is `app_container_1`). \n\n```yaml\nprocessors:\n filter/resource_attributes_exclude:\n metrics:\n exclude:\n match_type: strict\n metric_names:\n - hello_world\n - hello/world\n resource_attributes:\n - key: container.name\n value: app_container_1\n```\n\nWe can also use `regexp` to filter metrics using resource attributes. Following example will include only the metrics coming from `app_container_1` or `app_container_2` (the value for `container.name` resource attribute is either `app_container_1` or `app_container_2`). \n\n```yaml\nprocessors:\n filter/resource_attributes_regexp:\n metrics:\n exclude:\n match_type: regexp\n metric_names:\n - hello_world\n - hello/world\n resource_attributes:\n - key: container.name\n value: (app_container_1|app_container_1)\n```\n\nIn case the no metric names are provided, `matric_names` being empty, the filtering is only done at resource level.\n\n### Filter Spans from Traces\n\n* This pipeline is able to drop spans and whole traces \n* Note: If this drops a parent span, it does not search out it's children leading to a missing Span in your trace visualization\n\nSee the documentation in the [attribute processor](../attributesprocessor/README.md) for syntax\n\nFor spans, one of Services, SpanNames, Attributes, Resources or Libraries must be specified with a\nnon-empty value for a valid configuration.\n\n```yaml\nprocessors:\n filter/spans:\n spans:\n include:\n match_type: strict\n services:\n - app_3\n exclude:\n match_type: regexp\n services:\n - app_1\n - app_2\n span_names:\n - hello_world\n - hello/world\n attributes:\n - key: container.name\n value: (app_container_1|app_container_2)\n libraries:\n - name: opentelemetry\n version: 0.0-beta\n resources:\n - key: container.host\n value: (localhost|127.0.0.1)\n```\n\n## Warnings\n\nIn general, understand your data before using the filter processor.\n\n- When using the filterprocessor make sure you understand the look of your incoming data and test the configuration thoroughly. In general, use as specific a configuration as possible to lower the risk of the wrong data being dropped.\n- [Orphaned Telemetry](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/standard-warnings.md#orphaned-telemetry): The processor allows dropping spans. Dropping a span may lead to orphaned spans if the dropped span is a parent. Dropping a span may lead to orphaned logs if the log references the dropped span.","properties":{"error_mode":{"description":"ErrorMode determines how the processor reacts to errors that occur while processing an OTTL condition.\nValid values are `ignore` and `propagate`.\n`ignore` means the processor ignores errors returned by conditions and continues on to the next condition. This is the recommended mode.\n`propagate` means the processor returns the error up the pipeline. This will result in the payload being dropped from the collector.\nThe default value is `propagate`.","title":"error_mode","type":"string"},"logs":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.filterprocessor.LogFilters","title":"logs"},"metrics":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.filterprocessor.MetricFilters","title":"metrics"},"spans":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.internal.filter.filterconfig.MatchConfig","title":"spans"},"traces":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.filterprocessor.TraceFilters","title":"traces"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.filterprocessor.LogFilters":{"additionalProperties":false,"description":"LogFilters filters by Log properties.","properties":{"exclude":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.filterprocessor.LogMatchProperties","description":"Exclude match properties describe logs that should be excluded from the Collector Service pipeline,\nall other logs should be included.\nIf both Include and Exclude are specified, Include filtering occurs first.","title":"exclude"},"include":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.filterprocessor.LogMatchProperties","description":"Include match properties describe logs that should be included in the Collector Service pipeline,\nall other logs should be dropped from further processing.\nIf both Include and Exclude are specified, Include filtering occurs first.","title":"include"},"log_record":{"description":"LogConditions is a list of OTTL conditions for an ottllog context.\nIf any condition resolves to true, the log event will be dropped.\nSupports `and`, `or`, and `()`","items":{"type":"string"},"title":"log_record","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.filterprocessor.LogMatchProperties":{"additionalProperties":false,"description":"LogMatchProperties specifies the set of properties in a log to match against and the type of string pattern matching to use.","properties":{"bodies":{"description":"LogBodies is a list of strings that the LogRecord's body field must match\nagainst.","items":{"type":"string"},"title":"bodies","type":"array"},"match_type":{"description":"LogMatchType specifies the type of matching desired","title":"match_type","type":"string"},"record_attributes":{"description":"RecordAttributes defines a list of possible record attributes to match logs against.\nA match occurs if any record attribute matches at least one expression in this given list.","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.internal.filter.filterconfig.Attribute"},"title":"record_attributes","type":"array"},"resource_attributes":{"description":"ResourceAttributes defines a list of possible resource attributes to match logs against.\nA match occurs if any resource attribute matches all expressions in this given list.","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.internal.filter.filterconfig.Attribute"},"title":"resource_attributes","type":"array"},"severity_number":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.filterprocessor.LogSeverityNumberMatchProperties","description":"SeverityNumberProperties defines how to match against a log record's SeverityNumber, if defined.","title":"severity_number"},"severity_texts":{"description":"SeverityTexts is a list of strings that the LogRecord's severity text field must match\nagainst.","items":{"type":"string"},"title":"severity_texts","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.filterprocessor.LogSeverityNumberMatchProperties":{"additionalProperties":false,"properties":{"match_undefined":{"description":"MatchUndefined lets logs records with \"unknown\" severity match.\nIf MinSeverity is not set, this field is ignored, as fields are not matched based on severity.","title":"match_undefined","type":"boolean"},"min":{"description":"Min is the minimum severity needed for the log record to match.\nThis corresponds to the short names specified here:\nhttps://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model.md#displaying-severity\nthis field is case-insensitive (\"INFO\" == \"info\")","title":"min","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.filterprocessor.MetricFilters":{"additionalProperties":false,"description":"MetricFilters filters by Metric properties.","properties":{"datapoint":{"description":"DataPointConditions is a list of OTTL conditions for an ottldatapoint context.\nIf any condition resolves to true, the datapoint will be dropped.\nSupports `and`, `or`, and `()`","items":{"type":"string"},"title":"datapoint","type":"array"},"exclude":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.internal.filter.filterconfig.MetricMatchProperties","description":"Exclude match properties describe metrics that should be excluded from the Collector Service pipeline,\nall other metrics should be included.\nIf both Include and Exclude are specified, Include filtering occurs first.","title":"exclude"},"include":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.internal.filter.filterconfig.MetricMatchProperties","description":"Include match properties describe metrics that should be included in the Collector Service pipeline,\nall other metrics should be dropped from further processing.\nIf both Include and Exclude are specified, Include filtering occurs first.","title":"include"},"metric":{"description":"MetricConditions is a list of OTTL conditions for an ottlmetric context.\nIf any condition resolves to true, the metric will be dropped.\nSupports `and`, `or`, and `()`","items":{"type":"string"},"title":"metric","type":"array"},"regexp":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.internal.filter.filterset.regexp.Config","description":"RegexpConfig specifies options for the Regexp match type","title":"regexp"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.filterprocessor.TraceFilters":{"additionalProperties":false,"description":"TraceFilters filters by OTTL conditions","properties":{"span":{"description":"SpanConditions is a list of OTTL conditions for an ottlspan context.\nIf any condition resolves to true, the span will be dropped.\nSupports `and`, `or`, and `()`","items":{"type":"string"},"title":"span","type":"array"},"spanevent":{"description":"SpanEventConditions is a list of OTTL conditions for an ottlspanevent context.\nIf any condition resolves to true, the span event will be dropped.\nSupports `and`, `or`, and `()`","items":{"type":"string"},"title":"spanevent","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.groupbyattrsprocessor.Config":{"additionalProperties":false,"description":"Config is the configuration for the processor.","markdownDescription":"# Group by Attributes processor\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: traces, metrics, logs |\n| Distributions | [contrib], [observiq], [splunk], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\n## Description\n\nThis processor re-associates spans, log records and metric datapoints to a *Resource* that matches with the specified attributes. As a result, all spans, log records or metric datapoints with the same values for the specified attributes are \"grouped\" under the same *Resource*.\n\nTypical use cases:\n\n* extract resources from \"flat\" data formats, such as Fluentbit logs or Prometheus metrics\n* associate Prometheus metrics to a *Resource* that describes the relevant host, based on label present on all metrics\n* optimize data packaging by extracting common attributes\n* [compacting](#compaction) multiple records that share the same Resource and InstrumentationLibrary attributes but are under multiple ResourceSpans/ResourceMetrics/ResourceLogs, into a single ResourceSpans/ResourceMetrics/ResourceLogs (when empty list of keys is being provided). This might happen e.g. when [groupbytrace](../groupbytraceprocessor) processor is being used or data comes in multiple requests. By compacting data, it takes less memory, is more efficiently processed, serialized and the number of export requests is reduced (e.g. in case of [jaeger](../../exporter/jaegerexporter) exporter).\n\nIt is recommended to use the `groupbyattrs` processor together with [batch](https://github.com/open-telemetry/opentelemetry-collector/tree/main/processor/batchprocessor) processor, as a consecutive step, as this will reduce the fragmentation of data (by grouping records together under matching Resource/Instrumentation Library)\n\n## Examples\n\n### Grouping metrics\n\nConsider the below metrics, all originally associated to the same *Resource*:\n\n```go\nResource {host.name=\"localhost\",source=\"prom\"}\n Metric \"gauge-1\" (GAUGE)\n DataPoint {host.name=\"host-A\",id=\"eth0\"}\n DataPoint {host.name=\"host-A\",id=\"eth0\"}\n DataPoint {host.name=\"host-B\",id=\"eth0\"}\n Metric \"gauge-1\" (GAUGE) // Identical to previous Metric\n DataPoint {host.name=\"host-A\",id=\"eth0\"}\n DataPoint {host.name=\"host-A\",id=\"eth0\"}\n DataPoint {host.name=\"host-B\",id=\"eth0\"}\n Metric \"mixed-type\" (GAUGE)\n DataPoint {host.name=\"host-A\",id=\"eth0\"}\n DataPoint {host.name=\"host-A\",id=\"eth0\"}\n DataPoint {host.name=\"host-B\",id=\"eth0\"}\n Metric \"mixed-type\" (SUM)\n DataPoint {host.name=\"host-A\",id=\"eth0\"}\n DataPoint {host.name=\"host-A\",id=\"eth0\"}\n Metric \"dont-move\" (Gauge)\n DataPoint {id=\"eth0\"}\n```\n\nWith the below configuration, the **groupbyattrs** will re-associate the metrics with either `host-A` or `host-B`, based on the value of the `host.name` attribute.\n\n```yaml\nprocessors:\n groupbyattrs:\n keys:\n - host.name\n```\n\nThe output of the processor will therefore be:\n\n```go\nResource {host.name=\"localhost\",source=\"prom\"}\n Metric \"dont-move\" (Gauge)\n DataPoint {id=\"eth0\"}\n\nResource {host.name=\"host-A\",source=\"prom\"}\n Metric \"gauge-1\"\n DataPoint {id=\"eth0\"}\n DataPoint {id=\"eth0\"}\n DataPoint {id=\"eth0\"}\n DataPoint {id=\"eth0\"}\n Metric \"mixed-type\" (GAUGE)\n DataPoint {id=\"eth0\"}\n DataPoint {id=\"eth0\"}\n Metric \"mixed-type\" (SUM)\n DataPoint {id=\"eth0\"}\n DataPoint {id=\"eth0\"}\n\nResource {host.name=\"host-B\",source=\"prom\"}\n Metric \"gauge-1\"\n DataPoint {id=\"eth0\"}\n DataPoint {id=\"eth0\"}\n Metric \"mixed-type\" (GAUGE)\n DataPoint {id=\"eth0\"}\n```\n\nNotes:\n\n* The *DataPoints* for the `gauge-1` (GAUGE) metric were originally split under 2 *Metric* instances and have been merged in the output\n* The *DataPoints* of the `mixed-type` (GAUGE) and `mixed-type` (SUM) metrics have not been merged under the same *Metric*, because their *DataType* is different\n* The `dont-move` metric *DataPoints* don't have a `host.name` attribute and therefore remained under the original *Resource*\n* The new *Resources* inherited the attributes from the original *Resource* (`source=\"prom\"`), **plus** the specified attributes from the processed metrics (`host.name=\"host-A\"` or `host.name=\"host-B\"`)\n* The specified \"grouping\" attributes that are set on the new *Resources* are also **removed** from the metric *DataPoints*\n* While not shown in the above example, the processor also merges collections of records under matching InstrumentationLibrary\n\n### Compaction\n\nIn some cases, the data might come in single requests to the collector or become fragmented due to use of [groupbytrace](../groupbytraceprocessor) processor. Even after batching there might be multiple duplicated ResourceSpans/ResourceLogs/ResourceMetrics objects, which leads to additional memory consumption, increased processing costs, inefficient serialization and increase of the export requests. As a remedy, `groupbyattrs` processor might be used to compact the data with matching Resource and InstrumentationLibrary properties.\n\nFor example, consider the following input:\n\n```go\nResource {host.name=\"localhost\"}\n InstumentationLibrary {name=\"MyLibrary\"}\n Spans\n Span {span_id=1, ...}\n InstumentationLibrary {name=\"OtherLibrary\"}\n Spans\n Span {span_id=2, ...}\n \nResource {host.name=\"localhost\"}\n InstumentationLibrary {name=\"MyLibrary\"}\n Spans\n Span {span_id=3, ...}\n \nResource {host.name=\"localhost\"}\n InstumentationLibrary {name=\"MyLibrary\"}\n Spans\n Span {span_id=4, ...}\n \nResource {host.name=\"otherhost\"}\n InstumentationLibrary {name=\"MyLibrary\"}\n Spans\n Span {span_id=5, ...}\n```\n\nWith the below configuration, the **groupbyattrs** will re-associate the spans with matching Resource and InstrumentationLibrary. \n\n```yaml\nprocessors:\n batch:\n groupbyattrs:\n\npipelines:\n traces:\n processors: [batch, groupbyattrs/grouping]\n ...\n```\n\nThe output of the processor will therefore be:\n\n```go\nResource {host.name=\"localhost\"}\n InstumentationLibrary {name=\"MyLibrary\"}\n Spans\n Span {span_id=1, ...}\n Span {span_id=3, ...}\n Span {span_id=4, ...}\n InstumentationLibrary {name=\"OtherLibrary\"}\n Spans\n Span {span_id=2, ...}\n\nResource {host.name=\"otherhost\"}\n InstumentationLibrary {name=\"MyLibrary\"}\n Spans\n Span {span_id=5, ...}\n```\n\n## Configuration\n\nThe configuration is very simple, as you only need to specify an array of attribute keys that will be used to \"group\" spans, log records or metric data points together, as in the below example:\n\n```yaml\nprocessors:\n groupbyattrs:\n keys:\n - foo\n - bar\n```\n\nThe `keys` property describes which attribute keys will be considered for grouping:\n\n* If the processed span, log record and metric data point has at least one of the specified attributes key, it will be moved to a *Resource* with the same value for these attributes. The *Resource* will be created if none exists with the same attributes.\n* If none of the specified attributes key is present in the processed span, log record or metric data point, it remains associated to the same *Resource* (no change).\n\nPlease refer to:\n\n* [config.go](./config.go) for the config spec\n* [config.yaml](./testdata/config.yaml) for detailed examples on using the processor\n\n## Internal Metrics\n\nThe following internal metrics are recorded by this processor:\n\n| Metric | Description |\n| ------------------------- | -------------------------------------------------------- |\n| `num_grouped_spans` | the number of spans that had attributes grouped |\n| `num_non_grouped_spans` | the number of spans that did not have attributes grouped |\n| `span_groups` | distribution of groups extracted for spans |\n| `num_grouped_logs` | number of logs that had attributes grouped |\n| `num_non_grouped_logs` | number of logs that did not have attributes grouped |\n| `log_groups` | distribution of groups extracted for logs |\n| `num_grouped_metrics` | number of metrics that had attributes grouped |\n| `num_non_grouped_metrics` | number of metrics that did not have attributes grouped |\n| `metric_groups` | distribution of groups extracted for metrics |","properties":{"keys":{"description":"GroupByKeys describes the attribute names that are going to be used for grouping.\nEmpty value is allowed, since processor in such case can compact data","items":{"type":"string"},"title":"keys","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.groupbytraceprocessor.Config":{"additionalProperties":false,"description":"Config is the configuration for the processor.","markdownDescription":"# Group by Trace processor\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: traces |\n| Distributions | [contrib], [aws], [observiq], [sumo] |\n| Warnings | [Statefulness](#warnings) |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[aws]: https://github.com/aws-observability/aws-otel-collector\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThis processor collects all the spans from the same trace, waiting a \npre-determined amount of time before releasing the trace to the next processor.\nThe expectation is that, generally, traces will be complete after the given time.\n\nThis processor should be used whenever a processor requires grouped traces to make decisions,\nsuch as a tail-based sampler or a per-trace metrics processor.\n\nThe batch processor shouldn't be used before this processor, as this one will \nprobably undo part (or much) of the work that the batch processor performs. It's\nfine to have the batch processor to run right after this one, and every entry in the\nbatch will be a complete trace.\n\nPlease refer to [config.go](./config.go) for the config spec.\n\nExamples:\n\n```yaml\nprocessors:\n groupbytrace:\n groupbytrace/2:\n wait_duration: 10s\n num_traces: 1000\n num_workers: 2\n```\n\n## Configuration\n\nRefer to [config.yaml](./testdata/config.yaml) for detailed examples on using the processor.\n\nThe `num_traces` (default=1,000,000) property tells the processor what's the maximum number of traces to keep in the internal storage. A higher `num_traces` might incur in a higher memory usage.\n\nThe `wait_duration` (default=1s) property tells the processor for how long it should keep traces in the internal storage. Once a trace is kept for this duration, it's then released to the next consumer and removed from the internal storage. Spans from a trace that has been released will be kept for the entire duration again.\n\nThe `num_workers` (default=1) property controls how many concurrent workers the processor will use to process traces. If you are looking to optimize this value\nthen using GOMAXPROCS could be considered as a starting point. \n\n## Metrics\n\nThe following metrics are recorded by this processor:\n\n* `otelcol_processor_groupbytrace_conf_num_traces` represents the maximum number of traces that can be kept by the internal storage. This value comes from the processor's configuration and will never change over the lifecycle of the processor.\n* `otelcol_processor_groupbytrace_event_latency_bucket`, with the following `event` tag values:\n * `onTraceReceived` represents the number of traces' parts the processor has received from the previous components\n * `onTraceExpired` represents the number of traces that finished waiting in memory for spans to arrive\n * `onTraceReleased` represents the number of traces that have been marked as released to the next component\n * `onTraceRemoved` represents the number of traces that have been marked for removal from the internal storage\n* `otelcol_processor_groupbytrace_num_events_in_queue` representing the state of the internal queue. Ideally, this number would be close to zero, but might have temporary spikes if the storage is slow.\n* `otelcol_processor_groupbytrace_num_traces_in_memory` representing the state of the internal trace storage, waiting for spans to arrive. It's common to have items in memory all the time if the processor has a continuous flow of data. The longer the `wait_duration`, the higher the amount of traces in memory should be, given enough traffic.\n* `otelcol_processor_groupbytrace_spans_released` and `otelcol_processor_groupbytrace_traces_released` represent the number of spans and traces effectively released to the next component.\n* `otelcol_processor_groupbytrace_traces_evicted` represents the number of traces that have been evicted from the internal storage due to capacity problems. Ideally, this should be zero, or very close to zero at all times. If you keep getting items evicted, increase the `num_traces`.\n* `otelcol_processor_groupbytrace_incomplete_releases` represents the traces that have been marked as expired, but had been previously been removed. This might be the case when a span from a trace has been received in a batch while the trace existed in the in-memory storage, but has since been released/removed before the span could be added to the trace. This should always be very close to 0, and a high value might indicate a software bug.\n\nA healthy system would have the same value for the metric `otelcol_processor_groupbytrace_spans_released` and for three events under `otelcol_processor_groupbytrace_event_latency_bucket`: `onTraceExpired`, `onTraceRemoved` and `onTraceReleased`.\n\nThe metric `otelcol_processor_groupbytrace_event_latency_bucket` is a bucket and shows how long each event took to be processed in miliseconds. In most cases, it should take less than 5ms for an event to be processed, but it might be the case where an event could take 10ms. Higher latencies are possible, but it should never really reach the last item, representing 1s. Events taking more than 1s are killed automatically, and if you have multiple items in this bucket, it might indicate a bug in the software.\n\nMost metrics are updated when the events occur, except for the following ones, which are updated periodically:\n* `otelcol_processor_groupbytrace_num_events_in_queue`\n* `otelcol_processor_groupbytrace_num_traces_in_memory`\n\n## Warnings\n\n- [Statefulness](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/standard-warnings.md#statefulness): The groupbytrace processor's works best when all spans for a trace are sent to the same collector instance.","properties":{"discard_orphans":{"description":"DiscardOrphans instructs the processor to discard traces without the root span.\nThis typically indicates that the trace is incomplete.\nDefault: false.\nNot yet implemented, and an error will be returned when this option is used.","title":"discard_orphans","type":"boolean"},"num_traces":{"description":"NumTraces is the max number of traces to keep in memory waiting for the duration.\nDefault: 1_000_000.","title":"num_traces","type":"integer"},"num_workers":{"description":"NumWorkers is a number of workers processing event queue.\nDefault: 1.","title":"num_workers","type":"integer"},"store_on_disk":{"description":"StoreOnDisk tells the processor to keep only the trace ID in memory, serializing the trace spans to disk.\nUseful when the duration to wait for traces to complete is high.\nDefault: false.\nNot yet implemented, and an error will be returned when this option is used.","title":"store_on_disk","type":"boolean"},"wait_duration":{"description":"WaitDuration tells the processor to wait for the specified duration for the trace to be complete.\nDefault: 1s.","title":"wait_duration","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.k8sattributesprocessor.Config":{"additionalProperties":false,"description":"Config defines configuration for k8s attributes processor.","markdownDescription":"# Kubernetes Attributes Processor\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: logs, metrics, traces |\n| Distributions | [contrib], [observiq], [redhat], [splunk], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[redhat]: https://github.com/os-observability/redhat-opentelemetry-collector\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nKubernetes attributes processor allow automatic setting of spans, metrics and logs resource attributes with k8s metadata.\n\nThe processor automatically discovers k8s resources (pods), extracts metadata from them and adds the extracted metadata\nto the relevant spans, metrics and logs as resource attributes. The processor uses the kubernetes API to discover all pods\nrunning in a cluster, keeps a record of their IP addresses, pod UIDs and interesting metadata.\nThe rules for associating the data passing through the processor (spans, metrics and logs) with specific Pod Metadata are configured via \"pod_association\" key.\nIt represents a list of associations that are executed in the specified order until the first one is able to do the match.\n\n\n## Configuration\n\nThe processor stores the list of running pods and the associated metadata. When it sees a datapoint (log, trace or metric), it will try to associate the datapoint\nto the pod from where the datapoint originated, so we can add the relevant pod metadata to the datapoint. By default, it associates the incoming connection IP\nto the Pod IP. But for cases where this approach doesn't work (sending through a proxy, etc.), a custom association rule can be specified.\n\nEach association is specified as a list of sources of associations. A source is a rule that matches metadata from the datapoint to pod metadata.\nIn order to get an association applied, all the sources specified need to match.\n\nEach sources rule is specified as a pair of `from` (representing the rule type) and `name` (representing the attribute name if `from` is set to `resource_attribute`).\nFollowing rule types are available:\n\n**from: \"connection\"** - takes the IP attribute from connection context (if available)\n**from: \"resource_attribute\"** - allows to specify the attribute name to lookup up in the list of attributes of the received Resource.\n Semantic convention should be used for naming.\n\nPod association configuration.\n\n```yaml\npod_association:\n # below association takes a look at the datapoint's k8s.pod.ip resource attribute and tries to match it with\n # the pod having the same attribute.\n - sources:\n - from: resource_attribute\n name: k8s.pod.ip\n # below association matches for pair `k8s.pod.name` and `k8s.namespace.name`\n - sources:\n - from: resource_attribute\n name: k8s.pod.name\n - from: resource_attribute\n name: k8s.namespace.name\n```\n\nIf Pod association rules are not configured, resources are associated with metadata only by connection's IP Address.\n\nWhich metadata to collect is determined by `metadata` configuration that defines list of resource attributes\nto be added. Items in the list called exactly the same as the resource attributes that will be added.\nThe following attributes are added by default: \n - k8s.namespace.name\n - k8s.pod.name\n - k8s.pod.uid\n - k8s.pod.start_time\n - k8s.deployment.name\n - k8s.node.name\n\nYou can change this list with `metadata` configuration.\n\nNot all the attributes are guaranteed to be added. Only attribute names from `metadata` should be used for \npod_association's `resource_attribute`, because empty or non-existing values will be ignored.\n\nAdditional container level attributes can be extracted provided that certain resource attributes are provided:\n\n1. If the `container.id` resource attribute is provided, the following additional attributes will be available:\n - k8s.container.name\n - container.image.name\n - container.image.tag\n2. If the `k8s.container.name` resource attribute is provided, the following additional attributes will be available:\n - container.image.name\n - container.image.tag\n3. If the `k8s.container.restart_count` resource attribute is provided, it can be used to associate with a particular container\n instance. If it's not set, the latest container instance will be used:\n - container.id (not added by default, has to be specified in `metadata`)\n\nThe k8sattributesprocessor can also set resource attributes from k8s labels and annotations of pods and namespaces.\nThe config for associating the data passing through the processor (spans, metrics and logs) with specific Pod/Namespace annotations/labels is configured via \"annotations\" and \"labels\" keys.\nThis config represents a list of annotations/labels that are extracted from pods/namespaces and added to spans, metrics and logs.\nEach item is specified as a config of tag_name (representing the tag name to tag the spans with),\nkey (representing the key used to extract value) and from (representing the kubernetes object used to extract the value).\nThe \"from\" field has only two possible values \"pod\" and \"namespace\" and defaults to \"pod\" if none is specified.\n\nA few examples to use this config are as follows:\n\n```yaml\nannotations:\n - tag_name: a1 # extracts value of annotation from pods with key `annotation-one` and inserts it as a tag with key `a1`\n key: annotation-one\n from: pod\n - tag_name: a2 # extracts value of annotation from namespaces with key `annotation-two` with regexp and inserts it as a tag with key `a2`\n key: annotation-two\n regex: field=(?P\u003cvalue\u003e.+)\n from: namespace\n\nlabels:\n - tag_name: l1 # extracts value of label from namespaces with key `label1` and inserts it as a tag with key `l1`\n key: label1\n from: namespace\n - tag_name: l2 # extracts value of label from pods with key `label2` with regexp and inserts it as a tag with key `l2`\n key: label2\n regex: field=(?P\u003cvalue\u003e.+)\n from: pod\n```\n\n### Config example\n\n```yaml\nk8sattributes:\nk8sattributes/2:\n auth_type: \"serviceAccount\"\n passthrough: false\n filter:\n node_from_env_var: KUBE_NODE_NAME\n extract:\n metadata:\n - k8s.pod.name\n - k8s.pod.uid\n - k8s.deployment.name\n - k8s.namespace.name\n - k8s.node.name\n - k8s.pod.start_time\n pod_association:\n - sources:\n - from: resource_attribute\n name: k8s.pod.ip\n - sources:\n - from: resource_attribute\n name: k8s.pod.uid\n - sources:\n - from: connection\n```\n\n## Role-based access control\n\nThe k8sattributesprocessor needs `get`, `watch` and `list` permissions on both `pods` and `namespaces` resources, for all namespaces and pods included in the configured filters. Additionally, when using `k8s.deployment.uid` or `k8s.deployment.name` the processor also needs `get`, `watch` and `list` permissions for `replicaset` resources.\n\nHere is an example of a `ClusterRole` to give a `ServiceAccount` the necessary permissions for all pods and namespaces in the cluster (replace `\u003cOTEL_COL_NAMESPACE\u003e` with a namespace where collector is deployed):\n\n```yaml\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: collector\n namespace: \u003cOTEL_COL_NAMESPACE\u003e\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: otel-collector\nrules:\n- apiGroups: [\"\"]\n resources: [\"pods\", \"namespaces\"]\n verbs: [\"get\", \"watch\", \"list\"]\n- apiGroups: [\"apps\"]\n resources: [\"replicasets\"]\n verbs: [\"get\", \"list\", \"watch\"]\n- apiGroups: [\"extensions\"]\n resources: [\"replicasets\"]\n verbs: [\"get\", \"list\", \"watch\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: otel-collector\nsubjects:\n- kind: ServiceAccount\n name: collector\n namespace: \u003cOTEL_COL_NAMESPACE\u003e\nroleRef:\n kind: ClusterRole\n name: otel-collector\n apiGroup: rbac.authorization.k8s.io\n```\n\n## Deployment scenarios\n\nThe processor can be used in collectors deployed both as an agent (Kubernetes DaemonSet) or as a gateway (Kubernetes Deployment).\n\n### As an agent\n\nWhen running as an agent, the processor detects IP addresses of pods sending spans, metrics or logs to the agent\nand uses this information to extract metadata from pods. When running as an agent, it is important to apply\na discovery filter so that the processor only discovers pods from the same host that it is running on. Not using\nsuch a filter can result in unnecessary resource usage especially on very large clusters. Once the filter is applied,\neach processor will only query the k8s API for pods running on it's own node.\n\nNode filter can be applied by setting the `filter.node` config option to the name of a k8s node. While this works\nas expected, it cannot be used to automatically filter pods by the same node that the processor is running on in\nmost cases as it is not know before hand which node a pod will be scheduled on. Luckily, kubernetes has a solution\nfor this called the downward API. To automatically filter pods by the node the processor is running on, you'll need\nto complete the following steps:\n\n1. Use the downward API to inject the node name as an environment variable.\nAdd the following snippet under the pod env section of the OpenTelemetry container.\n\n```yaml\n2. spec:\n containers:\n - env:\n - name: KUBE_NODE_NAME\n valueFrom:\n fieldRef:\n apiVersion: v1\n fieldPath: spec.nodeName\n```\n\nThis will inject a new environment variable to the OpenTelemetry container with the value as the\nname of the node the pod was scheduled to run on.\n\n2. Set \"filter.node_from_env_var\" to the name of the environment variable holding the node name.\n\n```yaml\nk8sattributes:\n filter:\n node_from_env_var: KUBE_NODE_NAME # this should be same as the var name used in previous step\n```\n\nThis will restrict each OpenTelemetry agent to query pods running on the same node only dramatically reducing\nresource requirements for very large clusters.\n\n### As a gateway\n\nWhen running as a gateway, the processor cannot correctly detect the IP address of the pods generating\nthe telemetry data without any of the well-known IP attributes, when it receives them\nfrom an agent instead of receiving them directly from the pods. To\nworkaround this issue, agents deployed with the k8sattributes processor can be configured to detect\nthe IP addresses and forward them along with the telemetry data resources. Collector can then match this IP address\nwith k8s pods and enrich the records with the metadata. In order to set this up, you'll need to complete the\nfollowing steps:\n\n1. Setup agents in passthrough mode\nConfigure the agents' k8sattributes processors to run in passthrough mode.\n\n```yaml\n# k8sattributes config for agent\nk8sattributes:\n passthrough: true\n```\n\nThis will ensure that the agents detect the IP address as add it as an attribute to all telemetry resources.\nAgents will not make any k8s API calls, do any discovery of pods or extract any metadata.\n\n2. Configure the collector as usual\nNo special configuration changes are needed to be made on the collector. It'll automatically detect\nthe IP address of spans, logs and metrics sent by the agents as well as directly by other services/pods.\n\n## Caveats\n\nThere are some edge-cases and scenarios where k8sattributes will not work properly.\n\n### Host networking mode\n\nThe processor cannot correct identify pods running in the host network mode and\nenriching telemetry data generated by such pods is not supported at the moment, unless the association\nrule is not based on IP attribute.\n\n### As a sidecar\n\nThe processor does not support detecting containers from the same pods when running\nas a sidecar. While this can be done, we think it is simpler to just use the kubernetes\ndownward API to inject environment variables into the pods and directly use their values\nas tags.","properties":{"auth_type":{"description":"How to authenticate to the K8s API server. This can be one of `none`\n(for no auth), `serviceAccount` (to use the standard service account\ntoken provided to the agent pod), or `kubeConfig` to use credentials\nfrom `~/.kube/config`.","title":"auth_type","type":"string"},"exclude":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.k8sattributesprocessor.ExcludeConfig","description":"Exclude section allows to define names of pod that should be\nignored while tagging.","title":"exclude"},"extract":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.k8sattributesprocessor.ExtractConfig","description":"Extract section allows specifying extraction rules to extract\ndata from k8s pod specs","title":"extract"},"filter":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.k8sattributesprocessor.FilterConfig","description":"Filter section allows specifying filters to filter\npods by labels, fields, namespaces, nodes, etc.","title":"filter"},"passthrough":{"description":"Passthrough mode only annotates resources with the pod IP and\ndoes not try to extract any other metadata. It does not need\naccess to the K8S cluster API. Agent/Collector must receive spans\ndirectly from services to be able to correctly detect the pod IPs.","title":"passthrough","type":"boolean"},"pod_association":{"description":"Association section allows to define rules for tagging spans, metrics,\nand logs with Pod metadata.","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.k8sattributesprocessor.PodAssociationConfig"},"title":"pod_association","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.k8sattributesprocessor.ExcludeConfig":{"additionalProperties":false,"description":"ExcludeConfig represent a list of Pods to exclude","properties":{"pods":{"items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.k8sattributesprocessor.ExcludePodConfig"},"title":"pods","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.k8sattributesprocessor.ExcludePodConfig":{"additionalProperties":false,"description":"ExcludePodConfig represent a Pod name to ignore","properties":{"name":{"title":"name","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.k8sattributesprocessor.ExtractConfig":{"additionalProperties":false,"description":"ExtractConfig section allows specifying extraction rules to extract data from k8s pod specs.","properties":{"annotations":{"description":"Annotations allows extracting data from pod annotations and record it\nas resource attributes.\nIt is a list of FieldExtractConfig type. See FieldExtractConfig\ndocumentation for more details.","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.k8sattributesprocessor.FieldExtractConfig"},"title":"annotations","type":"array"},"labels":{"description":"Labels allows extracting data from pod labels and record it\nas resource attributes.\nIt is a list of FieldExtractConfig type. See FieldExtractConfig\ndocumentation for more details.","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.k8sattributesprocessor.FieldExtractConfig"},"title":"labels","type":"array"},"metadata":{"description":"Metadata allows to extract pod/namespace metadata from a list of metadata fields.\nThe field accepts a list of strings.\n\nMetadata fields supported right now are,\n k8s.pod.name, k8s.pod.uid, k8s.deployment.name,\n k8s.node.name, k8s.namespace.name, k8s.pod.start_time,\n k8s.replicaset.name, k8s.replicaset.uid,\n k8s.daemonset.name, k8s.daemonset.uid,\n k8s.job.name, k8s.job.uid, k8s.cronjob.name,\n k8s.statefulset.name, k8s.statefulset.uid,\n k8s.container.name, container.image.name,\n container.image.tag, container.id\n\nSpecifying anything other than these values will result in an error.\nBy default, the following fields are extracted and added to spans, metrics and logs as attributes:\n - k8s.pod.name\n - k8s.pod.uid\n - k8s.pod.start_time\n - k8s.namespace.name\n - k8s.node.name\n - k8s.deployment.name (if the pod is controlled by a deployment)\n - k8s.container.name (requires an additional attribute to be set: container.id)\n - container.image.name (requires one of the following additional attributes to be set: container.id or k8s.container.name)\n - container.image.tag (requires one of the following additional attributes to be set: container.id or k8s.container.name)","items":{"type":"string"},"title":"metadata","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.k8sattributesprocessor.FieldExtractConfig":{"additionalProperties":false,"description":"FieldExtractConfig allows specifying an extraction rule to extract a resource attribute from pod (or namespace) annotations (or labels).","properties":{"from":{"description":"From represents the source of the labels/annotations.\nAllowed values are \"pod\" and \"namespace\". The default is pod.","title":"from","type":"string"},"key":{"description":"Key represents the annotation (or label) name. This must exactly match an annotation (or label) name.","title":"key","type":"string"},"key_regex":{"description":"KeyRegex is a regular expression used to extract a Key that matches the regex.\nOut of Key or KeyRegex, only one option is expected to be configured at a time.","title":"key_regex","type":"string"},"regex":{"description":"Regex is an optional field used to extract a sub-string from a complex field value.\nThe supplied regular expression must contain one named parameter with the string \"value\"\nas the name. For example, if your pod spec contains the following annotation,\n\nkubernetes.io/change-cause: 2019-08-28T18:34:33Z APP_NAME=my-app GIT_SHA=58a1e39 CI_BUILD=4120\n\nand you'd like to extract the GIT_SHA and the CI_BUILD values as tags, then you must\nspecify the following two extraction rules:\n\nextract:\n annotations:\n - tag_name: git.sha\n key: kubernetes.io/change-cause\n regex: GIT_SHA=(?P\u003cvalue\u003e\\w+)\n - tag_name: ci.build\n key: kubernetes.io/change-cause\n regex: JENKINS=(?P\u003cvalue\u003e[\\w]+)\n\nthis will add the `git.sha` and `ci.build` resource attributes.","title":"regex","type":"string"},"tag_name":{"description":"TagName represents the name of the resource attribute that will be added to logs, metrics or spans.\nWhen not specified, a default tag name will be used of the format:\n - k8s.pod.annotations.\u003cannotation key\u003e\n - k8s.pod.labels.\u003clabel key\u003e\nFor example, if tag_name is not specified and the key is git_sha,\nthen the attribute name will be `k8s.pod.annotations.git_sha`.\nWhen key_regex is present, tag_name supports back reference to both named capturing and positioned capturing.\nFor example, if your pod spec contains the following labels,\n\napp.kubernetes.io/component: mysql\napp.kubernetes.io/version: 5.7.21\n\nand you'd like to add tags for all labels with prefix app.kubernetes.io/ and also trim the prefix,\nthen you can specify the following extraction rules:\n\nextract:\n labels:\n - tag_name: $$1\n key_regex: kubernetes.io/(.*)\n\nthis will add the `component` and `version` tags to the spans or metrics.","title":"tag_name","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.k8sattributesprocessor.FieldFilterConfig":{"additionalProperties":false,"description":"FieldFilterConfig allows specifying exactly one filter by a field.","properties":{"key":{"description":"Key represents the key or name of the field or labels that a filter\ncan apply on.","title":"key","type":"string"},"op":{"description":"Op represents the filter operation to apply on the given\nKey: Value pair. The following operations are supported\n equals, not-equals, exists, does-not-exist.","title":"op","type":"string"},"value":{"description":"Value represents the value associated with the key that a filter\noperation specified by the `Op` field applies on.","title":"value","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.k8sattributesprocessor.FilterConfig":{"additionalProperties":false,"description":"FilterConfig section allows specifying filters to filter pods by labels, fields, namespaces, nodes, etc.","properties":{"fields":{"description":"Fields allows to filter pods by generic k8s fields.\nOnly the following operations are supported:\n - equals\n - not-equals\n\nCheck FieldFilterConfig for more details.","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.k8sattributesprocessor.FieldFilterConfig"},"title":"fields","type":"array"},"labels":{"description":"Labels allows to filter pods by generic k8s pod labels.\nOnly the following operations are supported:\n - equals\n - not-equals\n - exists\n - not-exists\n\nCheck FieldFilterConfig for more details.","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.k8sattributesprocessor.FieldFilterConfig"},"title":"labels","type":"array"},"namespace":{"description":"Namespace filters all pods by the provided namespace. All other pods are ignored.","title":"namespace","type":"string"},"node":{"description":"Node represents a k8s node or host. If specified, any pods not running\non the specified node will be ignored by the tagger.","title":"node","type":"string"},"node_from_env_var":{"description":"NodeFromEnv can be used to extract the node name from an environment\nvariable. The value must be the name of the environment variable.\nThis is useful when the node a Otel agent will run on cannot be\npredicted. In such cases, the Kubernetes downward API can be used to\nadd the node name to each pod as an environment variable. K8s tagger\ncan then read this value and filter pods by it.\n\nFor example, node name can be passed to each agent with the downward API as follows\n\nenv:\n - name: K8S_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n\nThen the NodeFromEnv field can be set to `K8S_NODE_NAME` to filter all pods by the node that\nthe agent is running on.\n\nMore on downward API here: https://kubernetes.io/docs/tasks/inject-data-application/environment-variable-expose-pod-information/","title":"node_from_env_var","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.k8sattributesprocessor.PodAssociationConfig":{"additionalProperties":false,"description":"PodAssociationConfig contain single rule how to associate Pod metadata with logs, spans and metrics","properties":{"sources":{"description":"List of pod association sources which should be taken\nto identify pod","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.k8sattributesprocessor.PodAssociationSourceConfig"},"title":"sources","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.k8sattributesprocessor.PodAssociationSourceConfig":{"additionalProperties":false,"properties":{"from":{"description":"From represents the source of the association.\nAllowed values are \"connection\" and \"resource_attribute\".","title":"from","type":"string"},"name":{"description":"Name represents extracted key name.\ne.g. ip, pod_uid, k8s.pod.ip","title":"name","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.metricsgenerationprocessor.Config":{"additionalProperties":false,"description":"Config defines the configuration for the processor.","markdownDescription":"# Metrics Generation Processor\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [development]: metrics |\n| Distributions | [contrib], [aws], [sumo] |\n\n[development]: https://github.com/open-telemetry/opentelemetry-collector#development\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[aws]: https://github.com/aws-observability/aws-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\n**Status: under development; Not recommended for production usage.**\n\n## Description\n\nThe metrics generation processor (`experimental_metricsgenerationprocessor`) can be used to create new metrics using existing metrics following a given rule. Currently it supports following two approaches for creating a new metric.\n\n1. It can create a new metric from two existing metrics by applying one of the folliwing arithmetic operations: add, subtract, multiply, divide and percent. One use case is to calculate the `pod.memory.utilization` metric like the following equation-\n`pod.memory.utilization` = (`pod.memory.usage.bytes` / `node.memory.limit`)\n1. It can create a new metric by scaling the value of an existing metric with a given constant number. One use case is to convert `pod.memory.usage` metric values from Megabytes to Bytes (multiply the existing metric's value by 1,048,576)\n\n## Configuration\n\nConfiguration is specified through a list of generation rules. Generation rules find the metrics which \nmatch the given metric names and apply the specified operation to those metrics.\n\n```yaml\nprocessors:\n # processor name: experimental_metricsgeneration\n experimental_metricsgeneration:\n\n # specify the metric generation rules\n rules:\n # Name of the new metric. This is a required field.\n - name: \u003cnew_metric_name\u003e\n\n # Unit for the new metric being generated.\n unit: \u003cnew_metric_unit\u003e\n\n # type describes how the new metric will be generated. It can be one of `calculate` or `scale`. calculate generates a metric applying the given operation on two operand metrics. scale operates only on operand1 metric to generate the new metric.\n type: {calculate, scale}\n\n # This is a required field.\n metric1: \u003cfirst_operand_metric\u003e\n\n # This field is required only if the type is \"calculate\".\n metric2: \u003csecond_operand_metric\u003e\n\n # Operation specifies which arithmetic operation to apply. It must be one of the five supported operations.\n operation: {add, subtract, multiply, divide, percent}\n```\n\n## Example Configurations\n\n### Create a new metric using two existing metrics\n```yaml\n# create pod.cpu.utilized following (pod.cpu.usage / node.cpu.limit)\nrules:\n - name: pod.cpu.utilized\n type: calculate\n metric1: pod.cpu.usage\n metric2: node.cpu.limit\n operation: divide\n```\n\n### Create a new metric scaling the value of an existing metric\n```yaml\n# create pod.memory.usage.bytes from pod.memory.usage.megabytes\nrules:\n - name: pod.memory.usage.bytes\n unit: Bytes\n type: scale\n metric1: pod.memory.usage.megabytes\n operation: multiply\n scale_by: 1048576\n```","properties":{"rules":{"description":"Set of rules for generating new metrics","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.metricsgenerationprocessor.Rule"},"title":"rules","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.metricsgenerationprocessor.Rule":{"additionalProperties":false,"properties":{"metric1":{"description":"First operand metric to use in the calculation. This is a required field.","title":"metric1","type":"string"},"metric2":{"description":"Second operand metric to use in the calculation. A required field if the type is calculate.","title":"metric2","type":"string"},"name":{"description":"Name of the new metric being generated. This is a required field.","title":"name","type":"string"},"operation":{"description":"The arithmetic operation to apply for the calculation. This is a required field.","title":"operation","type":"string"},"scale_by":{"description":"A constant number by which the first operand will be scaled. A required field if the type is scale.","title":"scale_by","type":"number"},"type":{"description":"The rule type following which the new metric will be generated. This is a required field.","title":"type","type":"string"},"unit":{"description":"Unit for the new metric being generated.","title":"unit","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.metricstransformprocessor.Config":{"additionalProperties":false,"description":"Config defines configuration for Resource processor.","properties":{"transforms":{"description":"Transform specifies a list of transforms on metrics with each transform focusing on one metric.","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.metricstransformprocessor.Transform"},"title":"transforms","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.metricstransformprocessor.Operation":{"additionalProperties":false,"description":"Operation defines the specific operation performed on the selected metrics.","properties":{"action":{"description":"Action specifies the action performed for this operation.\nREQUIRED","title":"action","type":"string"},"aggregated_values":{"description":"AggregatedValues is a list of label values to aggregate away.","items":{"type":"string"},"title":"aggregated_values","type":"array"},"aggregation_type":{"description":"AggregationType specifies how to aggregate.","title":"aggregation_type","type":"string"},"experimental_scale":{"description":"Scale is a scalar to multiply the values with.","title":"experimental_scale","type":"number"},"label":{"description":"Label identifies the exact label to operate on.","title":"label","type":"string"},"label_set":{"description":"LabelSet is a list of labels to keep. All other labels are aggregated based on the AggregationType.","items":{"type":"string"},"title":"label_set","type":"array"},"label_value":{"description":"LabelValue identifies the exact label value to operate on","title":"label_value","type":"string"},"new_label":{"description":"NewLabel determines the name to rename the identified label to.","title":"new_label","type":"string"},"new_value":{"description":"NewValue is used to set a new label value either when the operation is `AggregatedValues` or `AddLabel`.","title":"new_value","type":"string"},"value_actions":{"description":"ValueActions is a list of renaming actions for label values.","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.metricstransformprocessor.ValueAction"},"title":"value_actions","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.metricstransformprocessor.Transform":{"additionalProperties":false,"description":"Transform defines the transformation applied to the specific metric","properties":{"action":{"description":"Action specifies the action performed on the matched metric. Action specifies\nif the operations (specified below) are performed on metrics in place (update),\non an inserted clone (insert), or on a new combined metric that includes all\ndata points from the set of matching metrics (combine).\nREQUIRED","title":"action","type":"string"},"aggregation_type":{"description":"AggregationType specifies how to aggregate.\nREQUIRED only if Action is COMBINE.","title":"aggregation_type","type":"string"},"experimental_match_labels":{"description":"MatchLabels specifies the label set against which the metric filter will work.\nThis field is optional.","patternProperties":{".*":{"type":"string"}},"title":"experimental_match_labels","type":"object"},"group_resource_labels":{"description":"GroupResourceLabels specifes resource labels that will be appended to this group's new ResourceMetrics message\nREQUIRED only if Action is GROUP","patternProperties":{".*":{"type":"string"}},"title":"group_resource_labels","type":"object"},"include":{"description":"Include specifies the metric(s) to operate on.","title":"include","type":"string"},"match_type":{"description":"MatchType determines how the Include string is matched: \u003cstrict|regexp\u003e.","title":"match_type","type":"string"},"new_name":{"description":"NewName specifies the name of the new metric when inserting or updating.\nREQUIRED only if Action is INSERT.","title":"new_name","type":"string"},"operations":{"description":"Operations contains a list of operations that will be performed on the resulting metric(s).","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.metricstransformprocessor.Operation"},"title":"operations","type":"array"},"submatch_case":{"description":"SubmatchCase specifies what case to use for label values created from regexp submatches.","title":"submatch_case","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.metricstransformprocessor.ValueAction":{"additionalProperties":false,"description":"ValueAction renames label values.","properties":{"new_value":{"description":"NewValue specifies the label value to rename to.","title":"new_value","type":"string"},"value":{"description":"Value specifies the current label value.","title":"value","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.probabilisticsamplerprocessor.Config":{"additionalProperties":false,"description":"Config has the configuration guiding the sampler processor.","properties":{"attribute_source":{"title":"attribute_source","type":"string"},"from_attribute":{"description":"FromAttribute (logs only) The optional name of a log record attribute used for sampling purposes, such as a\nunique log record ID. The value of the attribute is only used if the trace ID is absent or if `attribute_source` is set to `record`.","title":"from_attribute","type":"string"},"hash_seed":{"description":"HashSeed allows one to configure the hashing seed. This is important in scenarios where multiple layers of collectors\nhave different sampling rates: if they use the same seed all passing one layer may pass the other even if they have\ndifferent sampling rates, configuring different seeds avoids that.","title":"hash_seed","type":"integer"},"sampling_percentage":{"description":"SamplingPercentage is the percentage rate at which traces or logs are going to be sampled. Defaults to zero, i.e.: no sample.\nValues greater or equal 100 are treated as \"sample all traces/logs\".","title":"sampling_percentage","type":"number"},"sampling_priority":{"description":"SamplingPriority (logs only) allows to use a log record attribute designed by the `sampling_priority` key\nto be used as the sampling priority of the log record.","title":"sampling_priority","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.redactionprocessor.Config":{"additionalProperties":false,"markdownDescription":"# Redaction processor\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: traces |\n| Distributions | [contrib], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThis processor deletes span attributes that don't match a list of allowed span\nattributes. It also masks span attribute values that match a blocked value\nlist. Span attributes that aren't on the allowed list are removed before any\nvalue checks are done.\n\n## Use Cases\n\nTypical use-cases:\n\n* Prevent sensitive fields from accidentally leaking into traces\n* Ensure compliance with legal, privacy, or security requirements\n\nFor example:\n\n* EU General Data Protection Regulation (GDPR) prohibits the transfer of any\n personal data like birthdates, addresses, or ip addresses across borders\n without explicit consent from the data subject. Popular trace aggregation\n services are located in US, not in EU. You can use the redaction processor\n to scrub personal data from your data.\n* PRC legislation prohibits the transfer of geographic coordinates outside of\n the PRC. Popular trace aggregation services are located in US, not in the\n PRC. You can use the redaction processor to scrub geographic coordinates\n from your data.\n* Payment Card Industry (PCI) Data Security Standards prohibit logging certain\n things or storing them unencrypted. You can use the redaction processor to\n scrub them from your traces.\n\nThe above is written by an engineer, not a lawyer. The redaction processor is\nintended as one line of defence rather than the only compliance measure in\nplace.\n\n## Processor Configuration\n\nPlease refer to [config.go](./config.go) for the config spec.\n\nExamples:\n\n```yaml\nprocessors:\n redaction:\n # allow_all_keys is a flag which when set to true, which can disables the\n # allowed_keys list. The list of blocked_values is applied regardless. If\n # you just want to block values, set this to true.\n allow_all_keys: false\n # allowed_keys is a list of span attribute keys that are kept on the span and\n # processed. The list is designed to fail closed. If allowed_keys is empty,\n # no span attributes are allowed and all span attributes are removed. To\n # allow all keys, set allow_all_keys to true.\n allowed_keys:\n - description\n - group\n - id\n - name\n # Ignore the following attributes, allow them to pass without redaction.\n # Any keys in this list are allowed so they don't need to be in both lists.\n ignored_keys:\n - safe_attribute\n # blocked_values is a list of regular expressions for blocking values of\n # allowed span attributes. Values that match are masked\n blocked_values:\n - \"4[0-9]{12}(?:[0-9]{3})?\" ## Visa credit card number\n - \"(5[1-5][0-9]{14})\" ## MasterCard number\n # summary controls the verbosity level of the diagnostic attributes that\n # the processor adds to the spans when it redacts or masks other\n # attributes. In some contexts a list of redacted attributes leaks\n # information, while it is valuable when integrating and testing a new\n # configuration. Possible values:\n # - `debug` includes both redacted key counts and names in the summary\n # - `info` includes just the redacted key counts in the summary\n # - `silent` omits the summary attributes\n summary: debug\n```\n\nRefer to [config.yaml](./testdata/config.yaml) for how to fit the configuration\ninto an OpenTelemetry Collector pipeline definition.\n\nIgnored attributes are processed first so they're always allowed and never\nblocked. This field should only be used where you know the data is always\nsafe to send to the telemetry system.\n\nOnly span attributes included on the list of allowed keys list are retained.\nIf `allowed_keys` is empty, then no span attributes are allowed. All span\nattributes are removed in that case. To keep all span attributes, you should\nexplicitly set `allow_all_keys` to true.\n\n`blocked_values` applies to the values of the allowed keys. If the value of an\nallowed key matches the regular expression for a blocked value, the matching\npart of the value is then masked with a fixed length of asterisks.\n\nFor example, if `notes` is on the list of allowed keys, then the `notes` span\nattribute is retained. However, if there is a value such as a credit card\nnumber in the `notes` field that matched a regular expression on the list of\nblocked values, then that value is masked.","properties":{"allow_all_keys":{"description":"AllowAllKeys is a flag to allow all span attribute keys. Setting this\nto true disables the AllowedKeys list. The list of BlockedValues is\napplied regardless. If you just want to block values, set this to true.","title":"allow_all_keys","type":"boolean"},"allowed_keys":{"description":"AllowedKeys is a list of allowed span attribute keys. Span attributes\nnot on the list are removed. The list fails closed if it's empty. To\nallow all keys, you should explicitly set AllowAllKeys","items":{"type":"string"},"title":"allowed_keys","type":"array"},"blocked_values":{"description":"BlockedValues is a list of regular expressions for blocking values of\nallowed span attributes. Values that match are masked","items":{"type":"string"},"title":"blocked_values","type":"array"},"ignored_keys":{"description":"IgnoredKeys is a list of span attribute keys that are not redacted.\nSpan attributes in this list are allowed to pass through the filter\nwithout being changed or removed.","items":{"type":"string"},"title":"ignored_keys","type":"array"},"summary":{"description":"Summary controls the verbosity level of the diagnostic attributes that\nthe processor adds to the spans when it redacts or masks other\nattributes. In some contexts a list of redacted attributes leaks\ninformation, while it is valuable when integrating and testing a new\nconfiguration. Possible values are `debug`, `info`, and `silent`.","title":"summary","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.remoteobserverprocessor.Config":{"additionalProperties":false,"markdownDescription":"# Websocket Processor\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [alpha]: logs, metrics, traces |\n| Distributions | [] |\n\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha\n\u003c!-- end autogenerated section --\u003e\nThe WebSocket processor, which can be positioned anywhere in a pipeline, allows\ndata to pass through to the next component. Simultaneously, it makes a portion\nof the data accessible to WebSocket clients connecting on a configurable port.\nThis functionality resembles that of the Unix `tee` command, which enables data\nto flow through while duplicating and redirecting it for inspection.\n\nTo avoid overloading clients, the amount of telemetry duplicated over \nany open WebSockets is rate limited by an adjustable amount.\n\n## Config\n\nThe WebSocket processor has two configurable fields: `port` and `limit`:\n\n- `port`: The port on which the WebSocket processor listens. Optional. Defaults\n to `12001`.\n- `limit`: The rate limit over the WebSocket in messages per second. Can be a\n float or an integer. Optional. Defaults to `1`.\n\nExample configuration:\n\n```yaml\nwebsocket:\n port: 12001\n limit: 1 # rate limit 1 msg/sec\n```","properties":{"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth for this receiver","title":"auth"},"cors":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.confighttp.CORSSettings","description":"CORS configures the server for HTTP cross-origin resource sharing (CORS).","title":"cors"},"endpoint":{"description":"Endpoint configures the listening address for the server.","title":"endpoint","type":"string"},"include_metadata":{"description":"IncludeMetadata propagates the client metadata from the incoming requests to the downstream consumers\nExperimental: *NOTE* this option is subject to change or removal in the future.","title":"include_metadata","type":"boolean"},"limit":{"description":"Limit is a float that indicates the maximum number of messages repeated\nthrough the websocket by this processor in messages per second. Defaults to 1.","title":"limit","type":"number"},"max_request_body_size":{"description":"MaxRequestBodySize sets the maximum request body size in bytes","title":"max_request_body_size","type":"integer"},"response_headers":{"description":"Additional headers attached to each HTTP response sent to the client.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"response_headers","type":"object"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSServerSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.Config":{"additionalProperties":false,"description":"Config defines configuration for Resource processor.","markdownDescription":"# Resource Detection Processor\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: traces, metrics, logs |\n| Distributions | [contrib], [aws], [observiq], [splunk], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[aws]: https://github.com/aws-observability/aws-otel-collector\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe resource detection processor can be used to detect resource information from the host,\nin a format that conforms to the [OpenTelemetry resource semantic conventions](https://github.com/open-telemetry/opentelemetry-specification/tree/main/specification/resource/semantic_conventions/), and append or\noverride the resource value in telemetry data with this information.\n\n## Supported detectors\n\n### Environment Variable\n\nReads resource information from the `OTEL_RESOURCE_ATTRIBUTES` environment\nvariable. This is expected to be in the format `\u003ckey1\u003e=\u003cvalue1\u003e,\u003ckey2\u003e=\u003cvalue2\u003e,...`, the\ndetails of which are currently pending confirmation in the OpenTelemetry specification.\n\nExample:\n\n```yaml\nprocessors:\n resourcedetection/env:\n detectors: [env]\n timeout: 2s\n override: false\n```\n\n### System metadata\n\nNote: use the Docker detector (see below) if running the Collector as a Docker container.\n\nQueries the host machine to retrieve the following resource attributes:\n\n * host.name\n * host.id\n * os.type\n\nBy default `host.name` is being set to FQDN if possible, and a hostname provided by OS used as fallback.\nThis logic can be changed with `hostname_sources` configuration which is set to `[\"dns\", \"os\"]` by default.\n\nUse the following config to avoid getting FQDN and apply hostname provided by OS only:\n\n```yaml\nprocessors:\n resourcedetection/system:\n detectors: [\"system\"]\n system:\n hostname_sources: [\"os\"]\n```\n\n* all valid options for `hostname_sources`:\n * \"dns\"\n * \"os\"\n * \"cname\"\n * \"lookup\"\n\n#### Hostname Sources\n\n##### dns\n\nThe \"dns\" hostname source uses multiple sources to get the fully qualified domain name. First, it looks up the\nhost name in the local machine's `hosts` file. If that fails, it looks up the CNAME. Lastly, if that fails,\nit does a reverse DNS query. Note: this hostname source may produce unreliable results on Windows. To produce\na FQDN, Windows hosts might have better results using the \"lookup\" hostname source, which is mentioned below.\n\n##### os\n\nThe \"os\" hostname source provides the hostname provided by the local machine's kernel.\n\n##### cname\n\nThe \"cname\" hostname source provides the canonical name, as provided by net.LookupCNAME in the Go standard library.\nNote: this hostname source may produce unreliable results on Windows.\n\n##### lookup\n\nThe \"lookup\" hostname source does a reverse DNS lookup of the current host's IP address.\n\n### Docker metadata\n\nQueries the Docker daemon to retrieve the following resource attributes from the host machine:\n\n * host.name\n * os.type\n\nYou need to mount the Docker socket (`/var/run/docker.sock` on Linux) to contact the Docker daemon.\nDocker detection does not work on macOS.\n\nExample:\n\n```yaml\nprocessors:\n resourcedetection/docker:\n detectors: [env, docker]\n timeout: 2s\n override: false\n```\n\n### Heroku metadata\n\nWhen [Heroku dyno metadata is active](https://devcenter.heroku.com/articles/dyno-metadata), Heroku applications publish information through environment variables.\n\nWe map these environment variables to resource attributes as follows:\n\n| Dyno metadata environment variable | Resource attribute |\n|------------------------------------|-------------------------------------|\n| `HEROKU_APP_ID` | `heroku.app.id` |\n| `HEROKU_APP_NAME` | `service.name` |\n| `HEROKU_DYNO_ID` | `service.instance.id` |\n| `HEROKU_RELEASE_CREATED_AT` | `heroku.release.creation_timestamp` |\n| `HEROKU_RELEASE_VERSION` | `service.version` |\n| `HEROKU_SLUG_COMMIT` | `heroku.release.commit` |\n\nFor more information, see the [Heroku cloud provider documentation](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/semantic_conventions/cloud_provider/heroku.md) under the [OpenTelemetry specification semantic conventions](https://github.com/open-telemetry/opentelemetry-specification).\n\n```yaml\nprocessors:\n resourcedetection/heroku:\n detectors: [env, heroku]\n timeout: 2s\n override: false\n```\n\n### GCP Metadata\n\nUses the [Google Cloud Client Libraries for Go](https://github.com/googleapis/google-cloud-go)\nto read resource information from the [metadata server](https://cloud.google.com/compute/docs/storing-retrieving-metadata) and environment variables to detect which GCP platform the\napplication is running on, and detect the appropriate attributes for that platform. Regardless\nof the GCP platform the application is running on, use the gcp detector:\n\nExample:\n\n```yaml\nprocessors:\n resourcedetection/gcp:\n detectors: [env, gcp]\n timeout: 2s\n override: false\n```\n\n#### GCE Metadata\n\n * cloud.provider (\"gcp\")\n * cloud.platform (\"gcp_compute_engine\")\n * cloud.account.id (project id)\n * cloud.region (e.g. us-central1)\n * cloud.availability_zone (e.g. us-central1-c)\n * host.id (instance id)\n * host.name (instance name)\n * host.type (machine type)\n\n#### GKE Metadata\n\n * cloud.provider (\"gcp\")\n * cloud.platform (\"gcp_kubernetes_engine\")\n * cloud.account.id (project id)\n * cloud.region (only for regional GKE clusters; e.g. \"us-central1\")\n * cloud.availability_zone (only for zonal GKE clusters; e.g. \"us-central1-c\")\n * k8s.cluster.name\n * host.id (instance id)\n * host.name (instance name; only when workload identity is disabled)\n\nOne known issue is when GKE workload identity is enabled, the GCE metadata endpoints won't be available, thus the GKE resource detector won't be\nable to determine `host.name`. In that case, users are encouraged to set `host.name` from either:\n- `node.name` through the downward API with the `env` detector\n- obtaining the Kubernetes node name from the Kubernetes API (with `k8s.io/client-go`)\n\n#### Google Cloud Run Metadata\n\n * cloud.provider (\"gcp\")\n * cloud.platform (\"gcp_cloud_run\")\n * cloud.account.id (project id)\n * cloud.region (e.g. \"us-central1\")\n * faas.id (instance id)\n * faas.name (service name)\n * faas.version (service revision)\n\n#### Google Cloud Functions Metadata\n\n * cloud.provider (\"gcp\")\n * cloud.platform (\"gcp_cloud_functions\")\n * cloud.account.id (project id)\n * cloud.region (e.g. \"us-central1\")\n * faas.id (instance id)\n * faas.name (function name)\n * faas.version (function version)\n\n#### Google App Engine Metadata\n\n * cloud.provider (\"gcp\")\n * cloud.platform (\"gcp_app_engine\")\n * cloud.account.id (project id)\n * cloud.region (e.g. \"us-central1\")\n * cloud.availability_zone (e.g. \"us-central1-c\")\n * faas.id (instance id)\n * faas.name (service name)\n * faas.version (service version)\n\n### AWS EC2\n\nUses [AWS SDK for Go](https://docs.aws.amazon.com/sdk-for-go/api/aws/ec2metadata/) to read resource information from the [EC2 instance metadata API](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html) to retrieve the following resource attributes:\n\n * cloud.provider (\"aws\")\n * cloud.platform (\"aws_ec2\")\n * cloud.account.id\n * cloud.region\n * cloud.availability_zone\n * host.id\n * host.image.id\n * host.name\n * host.type\n\nIt also can optionally gather tags for the EC2 instance that the collector is running on.\nNote that in order to fetch EC2 tags, the IAM role assigned to the EC2 instance must have a policy that includes the `ec2:DescribeTags` permission.\n\nEC2 custom configuration example:\n```yaml\nprocessors:\n resourcedetection/ec2:\n detectors: [\"ec2\"]\n ec2:\n # A list of regex's to match tag keys to add as resource attributes can be specified\n tags:\n - ^tag1$\n - ^tag2$\n - ^label.*$\n```\n\nIf you are using a proxy server on your EC2 instance, it's important that you exempt requests for instance metadata as [described in the AWS cli user guide](https://github.com/awsdocs/aws-cli-user-guide/blob/a2393582590b64bd2a1d9978af15b350e1f9eb8e/doc_source/cli-configure-proxy.md#using-a-proxy-on-amazon-ec2-instances). Failing to do so can result in proxied or missing instance data.\n\nIf the instance is part of AWS ParallelCluster and the detector is failing to connect to the metadata server, check the iptable and make sure the chain `PARALLELCLUSTER_IMDS` contains a rule that allows OTEL user to access `169.254.169.254/32`\n\n### Amazon ECS\n\nQueries the [Task Metadata Endpoint](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint.html) (TMDE) to record information about the current ECS Task. Only TMDE V4 and V3 are supported.\n\n * cloud.provider (\"aws\")\n * cloud.platform (\"aws_ecs\")\n * cloud.account.id\n * cloud.region\n * cloud.availability_zone\n * aws.ecs.cluster.arn\n * aws.ecs.task.arn\n * aws.ecs.task.family\n * aws.ecs.task.revision\n * aws.ecs.launchtype (V4 only)\n * aws.log.group.names (V4 only)\n * aws.log.group.arns (V4 only)\n * aws.log.stream.names (V4 only)\n * aws.log.stream.arns (V4 only)\n\nExample:\n\n```yaml\nprocessors:\n resourcedetection/ecs:\n detectors: [env, ecs]\n timeout: 2s\n override: false\n```\n\n### Amazon Elastic Beanstalk\n\nReads the AWS X-Ray configuration file available on all Beanstalk instances with [X-Ray Enabled](https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/environment-configuration-debugging.html).\n\n * cloud.provider (\"aws\")\n * cloud.platform (\"aws_elastic_beanstalk\")\n * deployment.environment\n * service.instance.id\n * service.version\n\nExample:\n\n```yaml\nprocessors:\n resourcedetection/elastic_beanstalk:\n detectors: [env, elastic_beanstalk]\n timeout: 2s\n override: false\n```\n\n### Amazon EKS\n\n * cloud.provider (\"aws\")\n * cloud.platform (\"aws_eks\")\n\nExample:\n\n```yaml\nprocessors:\n resourcedetection/eks:\n detectors: [env, eks]\n timeout: 2s\n override: false\n```\n\n### AWS Lambda\n\nUses the AWS Lambda [runtime environment variables](https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html#configuration-envvars-runtime)\nto retrieve the following resource attributes:\n\n[Cloud semantic conventions](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/semantic_conventions/cloud.md)\n\n* `cloud.provider` (`\"aws\"`)\n* `cloud.platform` (`\"aws_lambda\"`)\n* `cloud.region` (`$AWS_REGION`)\n\n[Function as a Service semantic conventions](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/semantic_conventions/faas.md)\nand [AWS Lambda semantic conventions](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/instrumentation/aws-lambda.md#resource-detector)\n\n* `faas.name` (`$AWS_LAMBDA_FUNCTION_NAME`)\n* `faas.version` (`$AWS_LAMBDA_FUNCTION_VERSION`)\n* `faas.instance` (`$AWS_LAMBDA_LOG_STREAM_NAME`)\n* `faas.max_memory` (`$AWS_LAMBDA_FUNCTION_MEMORY_SIZE`)\n\n[AWS Logs semantic conventions](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/semantic_conventions/cloud_provider/aws/logs.md)\n\n* `aws.log.group.names` (`$AWS_LAMBDA_LOG_GROUP_NAME`)\n* `aws.log.stream.names` (`$AWS_LAMBDA_LOG_STREAM_NAME`)\n\nExample:\n\n```yaml\nprocessors:\n resourcedetection/lambda:\n detectors: [env, lambda]\n timeout: 0.2s\n override: false\n```\n\n### Azure\n\nQueries the [Azure Instance Metadata Service](https://aka.ms/azureimds) to retrieve the following resource attributes:\n\n * cloud.provider (\"azure\")\n * cloud.platform (\"azure_vm\")\n * cloud.region\n * cloud.account.id (subscription ID)\n * host.id (virtual machine ID)\n * host.name\n * azure.vm.name (same as host.name)\n * azure.vm.size (virtual machine size)\n * azure.vm.scaleset.name (name of the scale set if any)\n * azure.resourcegroup.name (resource group name)\n\nExample:\n\n```yaml\nprocessors:\n resourcedetection/azure:\n detectors: [env, azure]\n timeout: 2s\n override: false\n```\n\n### Azure AKS\n\n * cloud.provider (\"azure\")\n * cloud.platform (\"azure_aks\")\n\n```yaml\nprocessors:\n resourcedetection/aks:\n detectors: [env, aks]\n timeout: 2s\n override: false\n```\n\n### Consul\n\nQueries a [consul agent](https://www.consul.io/docs/agent) and reads its' [configuration endpoint](https://www.consul.io/api-docs/agent#read-configuration) to retrieve the following resource attributes:\n\n * cloud.region (consul datacenter)\n * host.id (consul node id)\n * host.name (consul node name)\n * *exploded consul metadata* - reads all key:value pairs in [consul metadata](https://www.consul.io/docs/agent/options#_node_meta) into label:labelvalue pairs.\n\n```yaml\nprocessors:\n resourcedetection/consul:\n detectors: [env, consul]\n timeout: 2s\n override: false\n```\n\n### Heroku\n\n** You must first enable the [Heroku metadata feature](https://devcenter.heroku.com/articles/dyno-metadata) on the application **\n\nQueries [Heroku metadata](https://devcenter.heroku.com/articles/dyno-metadata) to retrieve the following resource attributes:\n\n* heroku.release.version (identifier for the current release)\n* heroku.release.creation_timestamp (time and date the release was created)\n* heroku.release.commit (commit hash for the current release)\n* heroku.app.name (application name)\n* heroku.app.id (unique identifier for the application)\n* heroku.dyno.id (dyno identifier. Used as host name)\n\n```yaml\nprocessors:\n resourcedetection/heroku:\n detectors: [env, heroku]\n timeout: 2s\n override: false\n```\n\n### Openshift\n\nQueries the OpenShift and Kubernetes API to retrieve the following resource attributes:\n\n * cloud.provider\n * cloud.platform\n * cloud.region\n * k8s.cluster.name\n\nBy default, the API address is determined from the environment variables `KUBERNETES_SERVICE_HOST`, `KUBERNETES_SERVICE_PORT` and the service token is read from `/var/run/secrets/kubernetes.io/serviceaccount/token`.\nIf TLS is not explicit disabled and no `ca_file` is configured `/var/run/secrets/kubernetes.io/serviceaccount/ca.crt` is used.\nThe determination of the API address, ca_file and the service token is skipped if they are set in the configuration.\n\nExample:\n\n```yaml\nprocessors:\n resourcedetection/openshift:\n detectors: [openshift]\n timeout: 2s\n override: false\n openshift: # optional\n address: \"https://api.example.com\"\n token: \"token\"\n tls:\n insecure: false\n ca_file: \"/var/run/secrets/kubernetes.io/serviceaccount/ca.crt\"\n```\n\nSee: [TLS Configuration Settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md) for the full set of available options.\n\n## Configuration\n\n```yaml\n# a list of resource detectors to run, valid options are: \"env\", \"system\", \"gce\", \"gke\", \"ec2\", \"ecs\", \"elastic_beanstalk\", \"eks\", \"lambda\", \"azure\", \"heroku\", \"openshift\"\ndetectors: [ \u003cstring\u003e ]\n# determines if existing resource attributes should be overridden or preserved, defaults to true\noverride: \u003cbool\u003e\n# When included, only attributes in the list will be appened. Applies to all detectors.\nattributes: [ \u003cstring\u003e ]\n```\n\n## Ordering\n\nNote that if multiple detectors are inserting the same attribute name, the first detector to insert wins. For example if you had `detectors: [eks, ec2]` then `cloud.platform` will be `aws_eks` instead of `ec2`. The below ordering is recommended.\n\n### GCP\n\n* gke\n* gce\n\n### AWS\n\n* lambda\n* elastic_beanstalk\n* eks\n* ecs\n* ec2\n\nThe full list of settings exposed for this extension are documented [here](./config.go)\nwith detailed sample configurations [here](./testdata/config.yaml).","properties":{"CustomRoundTripper":{"description":"Custom Round Tripper to allow for individual components to intercept HTTP requests","title":"CustomRoundTripper"},"aks":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.azure.aks.Config","description":"Aks contains user-specified configurations for the aks detector","title":"aks"},"attributes":{"description":"Attributes is an allowlist of attributes to add.\nIf a supplied attribute is not a valid atrtibute of a supplied detector it will be ignored.","items":{"type":"string"},"title":"attributes","type":"array"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing HTTP calls.","title":"auth"},"azure":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.azure.Config","description":"Azure contains user-specified configurations for the azure detector","title":"azure"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"consul":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.consul.Config","description":"ConsulConfig contains user-specified configurations for the Consul detector","title":"consul"},"detectors":{"description":"Detectors is an ordered list of named detectors that should be\nrun to attempt to detect resource information.","items":{"type":"string"},"title":"detectors","type":"array"},"docker":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.docker.Config","description":"DockerConfig contains user-specified configurations for the docker detector","title":"docker"},"ec2":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.ec2.Config","description":"EC2Config contains user-specified configurations for the EC2 detector","title":"ec2"},"ecs":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.ecs.Config","description":"ECSConfig contains user-specified configurations for the ECS detector","title":"ecs"},"eks":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.eks.Config","description":"EKSConfig contains user-specified configurations for the EKS detector","title":"eks"},"elasticbeanstalk":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.elasticbeanstalk.Config","description":"Elasticbeanstalk contains user-specified configurations for the elasticbeanstalk detector","title":"elasticbeanstalk"},"endpoint":{"description":"The target URL to send data to (e.g.: http://some.url:9411/v1/traces).","title":"endpoint","type":"string"},"gcp":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.gcp.Config","description":"GcpConfig contains user-specified configurations for the gcp detector","title":"gcp"},"headers":{"description":"Additional headers attached to each HTTP request sent by the client.\nExisting header values are overwritten if collision happens.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"heroku":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.heroku.Config","description":"HerokuConfig contains user-specified configurations for the heroku detector","title":"heroku"},"idle_conn_timeout":{"description":"IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"idle_conn_timeout","type":"string"},"lambda":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.lambda.Config","description":"Lambda contains user-specified configurations for the lambda detector","title":"lambda"},"max_conns_per_host":{"description":"MaxConnsPerHost limits the total number of connections per host, including connections in the dialing,\nactive, and idle states.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_conns_per_host","type":"integer"},"max_idle_conns":{"description":"MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns","type":"integer"},"max_idle_conns_per_host":{"description":"MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns_per_host","type":"integer"},"openshift":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.openshift.Config","description":"OpenShift contains user-specified configurations for the Openshift detector","title":"openshift"},"override":{"description":"Override indicates whether any existing resource attributes\nshould be overridden or preserved. Defaults to true.","title":"override","type":"boolean"},"read_buffer_size":{"description":"ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize.","title":"read_buffer_size","type":"integer"},"system":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.system.Config","description":"SystemConfig contains user-specified configurations for the System detector","title":"system"},"timeout":{"description":"Timeout parameter configures `http.Client.Timeout`.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"write_buffer_size":{"description":"WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize.","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.ec2.Config":{"additionalProperties":false,"description":"Config defines user-specified configurations unique to the EC2 detector","properties":{"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.ec2.internal.metadata.ResourceAttributesConfig","title":"resource_attributes"},"tags":{"description":"Tags is a list of regex's to match ec2 instance tag keys that users want\nto add as resource attributes to processed data","items":{"type":"string"},"title":"tags","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.ec2.internal.metadata.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig provides common config for a particular resource attribute.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.ec2.internal.metadata.ResourceAttributesConfig":{"additionalProperties":false,"description":"ResourceAttributesConfig provides config for resourcedetectionprocessor/ec2 resource attributes.","properties":{"cloud.account.id":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.ec2.internal.metadata.ResourceAttributeConfig","title":"cloud.account.id"},"cloud.availability_zone":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.ec2.internal.metadata.ResourceAttributeConfig","title":"cloud.availability_zone"},"cloud.platform":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.ec2.internal.metadata.ResourceAttributeConfig","title":"cloud.platform"},"cloud.provider":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.ec2.internal.metadata.ResourceAttributeConfig","title":"cloud.provider"},"cloud.region":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.ec2.internal.metadata.ResourceAttributeConfig","title":"cloud.region"},"host.id":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.ec2.internal.metadata.ResourceAttributeConfig","title":"host.id"},"host.image.id":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.ec2.internal.metadata.ResourceAttributeConfig","title":"host.image.id"},"host.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.ec2.internal.metadata.ResourceAttributeConfig","title":"host.name"},"host.type":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.ec2.internal.metadata.ResourceAttributeConfig","title":"host.type"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.ecs.Config":{"additionalProperties":false,"properties":{"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.ecs.internal.metadata.ResourceAttributesConfig","title":"resource_attributes"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.ecs.internal.metadata.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig provides common config for a particular resource attribute.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.ecs.internal.metadata.ResourceAttributesConfig":{"additionalProperties":false,"description":"ResourceAttributesConfig provides config for resourcedetectionprocessor/ecs resource attributes.","properties":{"aws.ecs.cluster.arn":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.ecs.internal.metadata.ResourceAttributeConfig","title":"aws.ecs.cluster.arn"},"aws.ecs.launchtype":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.ecs.internal.metadata.ResourceAttributeConfig","title":"aws.ecs.launchtype"},"aws.ecs.task.arn":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.ecs.internal.metadata.ResourceAttributeConfig","title":"aws.ecs.task.arn"},"aws.ecs.task.family":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.ecs.internal.metadata.ResourceAttributeConfig","title":"aws.ecs.task.family"},"aws.ecs.task.revision":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.ecs.internal.metadata.ResourceAttributeConfig","title":"aws.ecs.task.revision"},"aws.log.group.arns":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.ecs.internal.metadata.ResourceAttributeConfig","title":"aws.log.group.arns"},"aws.log.group.names":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.ecs.internal.metadata.ResourceAttributeConfig","title":"aws.log.group.names"},"aws.log.stream.arns":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.ecs.internal.metadata.ResourceAttributeConfig","title":"aws.log.stream.arns"},"aws.log.stream.names":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.ecs.internal.metadata.ResourceAttributeConfig","title":"aws.log.stream.names"},"cloud.account.id":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.ecs.internal.metadata.ResourceAttributeConfig","title":"cloud.account.id"},"cloud.availability_zone":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.ecs.internal.metadata.ResourceAttributeConfig","title":"cloud.availability_zone"},"cloud.platform":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.ecs.internal.metadata.ResourceAttributeConfig","title":"cloud.platform"},"cloud.provider":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.ecs.internal.metadata.ResourceAttributeConfig","title":"cloud.provider"},"cloud.region":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.ecs.internal.metadata.ResourceAttributeConfig","title":"cloud.region"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.eks.Config":{"additionalProperties":false,"properties":{"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.eks.internal.metadata.ResourceAttributesConfig","title":"resource_attributes"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.eks.internal.metadata.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig provides common config for a particular resource attribute.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.eks.internal.metadata.ResourceAttributesConfig":{"additionalProperties":false,"description":"ResourceAttributesConfig provides config for resourcedetectionprocessor/eks resource attributes.","properties":{"cloud.platform":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.eks.internal.metadata.ResourceAttributeConfig","title":"cloud.platform"},"cloud.provider":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.eks.internal.metadata.ResourceAttributeConfig","title":"cloud.provider"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.elasticbeanstalk.Config":{"additionalProperties":false,"properties":{"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.elasticbeanstalk.internal.metadata.ResourceAttributesConfig","title":"resource_attributes"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.elasticbeanstalk.internal.metadata.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig provides common config for a particular resource attribute.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.elasticbeanstalk.internal.metadata.ResourceAttributesConfig":{"additionalProperties":false,"description":"ResourceAttributesConfig provides config for resourcedetectionprocessor/elastic_beanstalk resource attributes.","properties":{"cloud.platform":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.elasticbeanstalk.internal.metadata.ResourceAttributeConfig","title":"cloud.platform"},"cloud.provider":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.elasticbeanstalk.internal.metadata.ResourceAttributeConfig","title":"cloud.provider"},"deployment.environment":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.elasticbeanstalk.internal.metadata.ResourceAttributeConfig","title":"deployment.environment"},"service.instance.id":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.elasticbeanstalk.internal.metadata.ResourceAttributeConfig","title":"service.instance.id"},"service.version":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.elasticbeanstalk.internal.metadata.ResourceAttributeConfig","title":"service.version"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.lambda.Config":{"additionalProperties":false,"properties":{"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.lambda.internal.metadata.ResourceAttributesConfig","title":"resource_attributes"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.lambda.internal.metadata.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig provides common config for a particular resource attribute.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.lambda.internal.metadata.ResourceAttributesConfig":{"additionalProperties":false,"description":"ResourceAttributesConfig provides config for resourcedetectionprocessor/lambda resource attributes.","properties":{"aws.log.group.names":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.lambda.internal.metadata.ResourceAttributeConfig","title":"aws.log.group.names"},"aws.log.stream.names":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.lambda.internal.metadata.ResourceAttributeConfig","title":"aws.log.stream.names"},"cloud.platform":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.lambda.internal.metadata.ResourceAttributeConfig","title":"cloud.platform"},"cloud.provider":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.lambda.internal.metadata.ResourceAttributeConfig","title":"cloud.provider"},"cloud.region":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.lambda.internal.metadata.ResourceAttributeConfig","title":"cloud.region"},"faas.instance":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.lambda.internal.metadata.ResourceAttributeConfig","title":"faas.instance"},"faas.max_memory":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.lambda.internal.metadata.ResourceAttributeConfig","title":"faas.max_memory"},"faas.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.lambda.internal.metadata.ResourceAttributeConfig","title":"faas.name"},"faas.version":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.aws.lambda.internal.metadata.ResourceAttributeConfig","title":"faas.version"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.azure.Config":{"additionalProperties":false,"properties":{"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.azure.internal.metadata.ResourceAttributesConfig","title":"resource_attributes"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.azure.aks.Config":{"additionalProperties":false,"properties":{"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.azure.aks.internal.metadata.ResourceAttributesConfig","title":"resource_attributes"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.azure.aks.internal.metadata.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig provides common config for a particular resource attribute.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.azure.aks.internal.metadata.ResourceAttributesConfig":{"additionalProperties":false,"description":"ResourceAttributesConfig provides config for resourcedetectionprocessor/aks resource attributes.","properties":{"cloud.platform":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.azure.aks.internal.metadata.ResourceAttributeConfig","title":"cloud.platform"},"cloud.provider":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.azure.aks.internal.metadata.ResourceAttributeConfig","title":"cloud.provider"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.azure.internal.metadata.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig provides common config for a particular resource attribute.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.azure.internal.metadata.ResourceAttributesConfig":{"additionalProperties":false,"description":"ResourceAttributesConfig provides config for resourcedetectionprocessor/azure resource attributes.","properties":{"azure.resourcegroup.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.azure.internal.metadata.ResourceAttributeConfig","title":"azure.resourcegroup.name"},"azure.vm.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.azure.internal.metadata.ResourceAttributeConfig","title":"azure.vm.name"},"azure.vm.scaleset.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.azure.internal.metadata.ResourceAttributeConfig","title":"azure.vm.scaleset.name"},"azure.vm.size":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.azure.internal.metadata.ResourceAttributeConfig","title":"azure.vm.size"},"cloud.account.id":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.azure.internal.metadata.ResourceAttributeConfig","title":"cloud.account.id"},"cloud.platform":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.azure.internal.metadata.ResourceAttributeConfig","title":"cloud.platform"},"cloud.provider":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.azure.internal.metadata.ResourceAttributeConfig","title":"cloud.provider"},"cloud.region":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.azure.internal.metadata.ResourceAttributeConfig","title":"cloud.region"},"host.id":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.azure.internal.metadata.ResourceAttributeConfig","title":"host.id"},"host.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.azure.internal.metadata.ResourceAttributeConfig","title":"host.name"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.consul.Config":{"additionalProperties":false,"description":"The struct requires no user-specified fields by default as consul agent's default configuration will be provided to the API client.","properties":{"address":{"description":"Address is the address of the Consul server","title":"address","type":"string"},"datacenter":{"description":"Datacenter to use. If not provided, the default agent datacenter is used.","title":"datacenter","type":"string"},"meta":{"description":"Allowlist of [Consul\nMetadata](https://www.consul.io/docs/agent/options#node_meta) keys to use as\nresource attributes.","patternProperties":{".*":true},"title":"meta","type":"object"},"namespace":{"description":"Namespace is the name of the namespace to send along for the request\nwhen no other Namespace is present in the QueryOptions","title":"namespace","type":"string"},"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.consul.internal.metadata.ResourceAttributesConfig","description":"ResourceAttributes configuration for Consul detector","title":"resource_attributes"},"token":{"description":"Token is used to provide a per-request ACL token\nwhich overrides the agent's default (empty) token.\nToken or Tokenfile are only required if [Consul's ACL\nSystem](https://www.consul.io/docs/security/acl/acl-system) is enabled.","title":"token","type":"string"},"token_file":{"description":"TokenFile is a file containing the current token to use for this client.\nIf provided it is read once at startup and never again.\nToken or Tokenfile are only required if [Consul's ACL\nSystem](https://www.consul.io/docs/security/acl/acl-system) is enabled.","title":"token_file","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.consul.internal.metadata.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig provides common config for a particular resource attribute.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.consul.internal.metadata.ResourceAttributesConfig":{"additionalProperties":false,"description":"ResourceAttributesConfig provides config for resourcedetectionprocessor/consul resource attributes.","properties":{"azure.resourcegroup.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.consul.internal.metadata.ResourceAttributeConfig","title":"azure.resourcegroup.name"},"azure.vm.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.consul.internal.metadata.ResourceAttributeConfig","title":"azure.vm.name"},"azure.vm.scaleset.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.consul.internal.metadata.ResourceAttributeConfig","title":"azure.vm.scaleset.name"},"azure.vm.size":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.consul.internal.metadata.ResourceAttributeConfig","title":"azure.vm.size"},"cloud.account.id":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.consul.internal.metadata.ResourceAttributeConfig","title":"cloud.account.id"},"cloud.platform":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.consul.internal.metadata.ResourceAttributeConfig","title":"cloud.platform"},"cloud.provider":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.consul.internal.metadata.ResourceAttributeConfig","title":"cloud.provider"},"cloud.region":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.consul.internal.metadata.ResourceAttributeConfig","title":"cloud.region"},"host.id":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.consul.internal.metadata.ResourceAttributeConfig","title":"host.id"},"host.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.consul.internal.metadata.ResourceAttributeConfig","title":"host.name"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.docker.Config":{"additionalProperties":false,"properties":{"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.docker.internal.metadata.ResourceAttributesConfig","title":"resource_attributes"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.docker.internal.metadata.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig provides common config for a particular resource attribute.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.docker.internal.metadata.ResourceAttributesConfig":{"additionalProperties":false,"description":"ResourceAttributesConfig provides config for resourcedetectionprocessor/docker resource attributes.","properties":{"host.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.docker.internal.metadata.ResourceAttributeConfig","title":"host.name"},"os.type":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.docker.internal.metadata.ResourceAttributeConfig","title":"os.type"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.gcp.Config":{"additionalProperties":false,"properties":{"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.gcp.internal.metadata.ResourceAttributesConfig","title":"resource_attributes"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.gcp.internal.metadata.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig provides common config for a particular resource attribute.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.gcp.internal.metadata.ResourceAttributesConfig":{"additionalProperties":false,"description":"ResourceAttributesConfig provides config for resourcedetectionprocessor/gcp resource attributes.","properties":{"cloud.account.id":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.gcp.internal.metadata.ResourceAttributeConfig","title":"cloud.account.id"},"cloud.availability_zone":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.gcp.internal.metadata.ResourceAttributeConfig","title":"cloud.availability_zone"},"cloud.platform":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.gcp.internal.metadata.ResourceAttributeConfig","title":"cloud.platform"},"cloud.provider":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.gcp.internal.metadata.ResourceAttributeConfig","title":"cloud.provider"},"cloud.region":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.gcp.internal.metadata.ResourceAttributeConfig","title":"cloud.region"},"faas.id":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.gcp.internal.metadata.ResourceAttributeConfig","title":"faas.id"},"faas.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.gcp.internal.metadata.ResourceAttributeConfig","title":"faas.name"},"faas.version":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.gcp.internal.metadata.ResourceAttributeConfig","title":"faas.version"},"host.id":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.gcp.internal.metadata.ResourceAttributeConfig","title":"host.id"},"host.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.gcp.internal.metadata.ResourceAttributeConfig","title":"host.name"},"host.type":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.gcp.internal.metadata.ResourceAttributeConfig","title":"host.type"},"k8s.cluster.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.gcp.internal.metadata.ResourceAttributeConfig","title":"k8s.cluster.name"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.heroku.Config":{"additionalProperties":false,"properties":{"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.heroku.internal.metadata.ResourceAttributesConfig","title":"resource_attributes"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.heroku.internal.metadata.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig provides common config for a particular resource attribute.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.heroku.internal.metadata.ResourceAttributesConfig":{"additionalProperties":false,"description":"ResourceAttributesConfig provides config for resourcedetectionprocessor/heroku resource attributes.","properties":{"cloud.provider":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.heroku.internal.metadata.ResourceAttributeConfig","title":"cloud.provider"},"heroku.app.id":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.heroku.internal.metadata.ResourceAttributeConfig","title":"heroku.app.id"},"heroku.app.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.heroku.internal.metadata.ResourceAttributeConfig","title":"heroku.app.name"},"heroku.dyno.id":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.heroku.internal.metadata.ResourceAttributeConfig","title":"heroku.dyno.id"},"heroku.release.commit":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.heroku.internal.metadata.ResourceAttributeConfig","title":"heroku.release.commit"},"heroku.release.creation_timestamp":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.heroku.internal.metadata.ResourceAttributeConfig","title":"heroku.release.creation_timestamp"},"heroku.release.version":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.heroku.internal.metadata.ResourceAttributeConfig","title":"heroku.release.version"},"service.instance.id":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.heroku.internal.metadata.ResourceAttributeConfig","title":"service.instance.id"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.openshift.Config":{"additionalProperties":false,"description":"Config can contain user-specified inputs to overwrite default values.","properties":{"address":{"description":"Address is the address of the openshift api server","title":"address","type":"string"},"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.openshift.internal.metadata.ResourceAttributesConfig","title":"resource_attributes"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSettings contains TLS configurations that are specific to client\nconnection used to communicate with the Openshift API.","title":"tls"},"token":{"description":"Token is used to identify against the openshift api server","title":"token","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.openshift.internal.metadata.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig provides common config for a particular resource attribute.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.openshift.internal.metadata.ResourceAttributesConfig":{"additionalProperties":false,"description":"ResourceAttributesConfig provides config for resourcedetectionprocessor/openshift resource attributes.","properties":{"cloud.platform":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.openshift.internal.metadata.ResourceAttributeConfig","title":"cloud.platform"},"cloud.provider":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.openshift.internal.metadata.ResourceAttributeConfig","title":"cloud.provider"},"cloud.region":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.openshift.internal.metadata.ResourceAttributeConfig","title":"cloud.region"},"k8s.cluster.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.openshift.internal.metadata.ResourceAttributeConfig","title":"k8s.cluster.name"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.system.Config":{"additionalProperties":false,"description":"Config defines user-specified configurations unique to the system detector","properties":{"hostname_sources":{"description":"The HostnameSources is a priority list of sources from which hostname will be fetched.\nIn case of the error in fetching hostname from source,\nthe next source from the list will be considered.(**default**: `[\"dns\", \"os\"]`)","items":{"type":"string"},"title":"hostname_sources","type":"array"},"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.system.internal.metadata.ResourceAttributesConfig","title":"resource_attributes"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.system.internal.metadata.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig provides common config for a particular resource attribute.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.system.internal.metadata.ResourceAttributesConfig":{"additionalProperties":false,"description":"ResourceAttributesConfig provides config for resourcedetectionprocessor/system resource attributes.","properties":{"host.id":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.system.internal.metadata.ResourceAttributeConfig","title":"host.id"},"host.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.system.internal.metadata.ResourceAttributeConfig","title":"host.name"},"os.type":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.internal.system.internal.metadata.ResourceAttributeConfig","title":"os.type"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourceprocessor.Config":{"additionalProperties":false,"description":"Config defines configuration for Resource processor.","markdownDescription":"# Resource Processor\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: traces, metrics, logs |\n| Distributions | [core], [contrib], [aws], [observiq], [splunk], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[aws]: https://github.com/aws-observability/aws-otel-collector\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe resource processor can be used to apply changes on resource attributes.\nPlease refer to [config.go](./config.go) for the config spec.\n\n`attributes` represents actions that can be applied on resource attributes.\nSee [Attributes Processor](../attributesprocessor/README.md) for more details on supported attributes actions.\n\nExamples:\n\n```yaml\nprocessors:\n resource:\n attributes:\n - key: cloud.availability_zone\n value: \"zone-1\"\n action: upsert\n - key: k8s.cluster.name\n from_attribute: k8s-cluster\n action: insert\n - key: redundant-attribute\n action: delete\n```\n\nRefer to [config.yaml](./testdata/config.yaml) for detailed\nexamples on using the processor.","properties":{"attributes":{"description":"AttributesActions specifies the list of actions to be applied on resource attributes.\nThe set of actions are {INSERT, UPDATE, UPSERT, DELETE, HASH, EXTRACT}.","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.internal.coreinternal.attraction.ActionKeyValue"},"title":"attributes","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.routingprocessor.Config":{"additionalProperties":false,"description":"Config defines configuration for the Routing processor.","markdownDescription":"# Routing processor\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: traces, metrics, logs |\n| Distributions | [contrib], [observiq], [redhat], [splunk], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[redhat]: https://github.com/os-observability/redhat-opentelemetry-collector\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nRoutes logs, metrics or traces to specific exporters.\n\nThis processor will either read a header from the incoming HTTP request (gRPC or plain HTTP), or it will read a resource attribute, and direct the trace information to specific exporters based on the value read.\n\nThis processor *does not* let traces/metrics/logs to continue through the pipeline and will emit a warning in case other processor(s) are defined after this one.\nSimilarly, exporters defined as part of the pipeline are not authoritative: if you add an exporter to the pipeline, make sure you add it to this processor *as well*, otherwise it won't be used at all.\nAll exporters defined as part of this processor *must also* be defined as part of the pipeline's exporters.\n\nGiven that this processor depends on information provided by the client via HTTP headers or resource attributes, caution must be taken when processors that aggregate data like `batch` or `groupbytrace` are used as part of the pipeline.\n\n## Configuration\n\nThe following settings are required:\n\n- `from_attribute`: contains the HTTP header name or the resource attribute name to look up the route's value. Only the OTLP exporter has been tested in connection with the OTLP gRPC Receiver, but any other gRPC receiver should work fine, as long as the client sends the specified HTTP header.\n- `table`: the routing table for this processor.\n- `table.value`: a possible value for the attribute specified under FromAttribute.\n- `table.exporters`: the list of exporters to use when the value from the FromAttribute field matches this table item.\n\nThe following settings can be optionally configured:\n\n- `attribute_source` defines where to look for the attribute in `from_attribute`. The allowed values are:\n - `context` (the default) - to search the [context][context_docs], which includes HTTP headers\n - `resource` - to search the resource attributes.\n- `drop_resource_routing_attribute` - controls whether to remove the resource attribute used for routing. This is only relevant if AttributeSource is set to resource.\n- `default_exporters` contains the list of exporters to use when a more specific record can't be found in the routing table.\n\nExample:\n\n```yaml\nprocessors:\n routing:\n from_attribute: X-Tenant\n default_exporters:\n - jaeger\n table:\n - value: acme\n exporters: [jaeger/acme]\nexporters:\n jaeger:\n endpoint: localhost:14250\n jaeger/acme:\n endpoint: localhost:24250\n```\n\n### Tech Preview: OpenTelemetry Transformation Language statements as routing conditions\n\nAlternatively, it is possible to use subset of the [OpenTelemetry Transformation Language (OTTL)](../../pkg/ottl/README.md) statements as routing conditions.\n\nTo configure the routing processor with [OTTL] routing conditions use the following options:\n\n- `table (required)`: the routing table for this processor.\n- `table.statement (required)`: the routing condition provided as the [OTTL] statement.\n- `table.exporters (required)`: the list of exporters to use when the routing condition is met.\n- `default_exporters (optional)`: contains the list of exporters to use when a record does not meet any of specified conditions.\n- `error_mode (optional)`: determines how errors returned from OTTL statements are handled. Valid values are `ignore` and `propagate`. If `ignored` is used and a statement's condition has an error then the payload will be routed to the default exporter. If not supplied, `propagate` is used.\n\n\n```yaml\n\nprocessors:\n routing:\n default_exporters:\n - jaeger\n error_mode: ignore\n table:\n - statement: route() where resource.attributes[\"X-Tenant\"] == \"acme\"\n exporters: [jaeger/acme]\n - statement: delete_key(resource.attributes, \"X-Tenant\") where IsMatch(resource.attributes[\"X-Tenant\"], \".*corp\")\n exporters: [jaeger/ecorp]\n\nexporters:\n jaeger:\n endpoint: localhost:14250\n jaeger/acme:\n endpoint: localhost:24250\n jaeger/ecorp:\n endpoint: localhost:34250\n```\n\nA signal may get matched by routing conditions of more than one routing table entry. In this case, the signal will be routed to all exporters of matching routes.\nRespectively, if none of the routing conditions met, then a signal is routed to default exporters.\n\nIt is also possible to mix both the conventional routing configuration and the routing configuration with [OTTL] conditions.\n\n#### Limitations:\n\n- [OTTL] statements can be applied only to resource attributes.\n- Currently, it is not possible to specify the boolean statements without function invocation as the routing condition. It is required to provide the NOOP `route()` or any other supported function as part of the routing statement, see [#13545](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/13545) for more information.\n- Supported [OTTL] functions:\n - [IsMatch](../../pkg/ottl/ottlfuncs/README.md#IsMatch)\n - [delete_key](../../pkg/ottl/ottlfuncs/README.md#delete_key)\n - [delete_matching_keys](../../pkg/ottl/ottlfuncs/README.md#delete_matching_keys)\n\nThe full list of settings exposed for this processor are documented [here](./config.go) with detailed sample configuration files:\n\n- [logs](./testdata/config_logs.yaml)\n- [metrics](./testdata/config_metrics.yaml)\n- [traces](./testdata/config_traces.yaml)\n\n[context_docs]: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/context/README.md\n[OTTL]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/processing.md#telemetry-query-language","properties":{"attribute_source":{"description":"AttributeSource defines where the attribute defined in `from_attribute` is searched for.\nThe allowed values are:\n- \"context\" - the attribute must exist in the incoming context\n- \"resource\" - the attribute must exist in resource attributes\nThe default value is \"context\".\nOptional.","title":"attribute_source","type":"string"},"default_exporters":{"description":"DefaultExporters contains the list of exporters to use when a more specific record can't be found in the routing table.\nOptional.","items":{"type":"string"},"title":"default_exporters","type":"array"},"drop_resource_routing_attribute":{"description":"DropRoutingResourceAttribute controls whether to remove the resource attribute used for routing.\nThis is only relevant if AttributeSource is set to resource.\nOptional.","title":"drop_resource_routing_attribute","type":"boolean"},"error_mode":{"description":"ErrorMode determines how the processor reacts to errors that occur while processing an OTTL condition.\nValid values are `ignore` and `propagate`.\n`ignore` means the processor ignores errors returned by conditions and continues on to the next condition. This is the recommended mode.\nIf `ignored` is used and a statement's condition has an error then the payload will be routed to the default exporter.\n`propagate` means the processor returns the error up the pipeline. This will result in the payload being dropped from the collector.\nThe default value is `propagate`.","title":"error_mode","type":"string"},"from_attribute":{"description":"FromAttribute contains the attribute name to look up the route value. This attribute should be part of the context propagated\ndown from the previous receivers and/or processors. If all the receivers and processors are propagating the entire context correctly,\nthis could be the HTTP/gRPC header from the original request/RPC. Typically, aggregation processors (batch, groupbytrace)\nwill create a new context, so, those should be avoided when using this processor.Although the HTTP spec allows headers to be repeated,\nthis processor will only use the first value.\nRequired.","title":"from_attribute","type":"string"},"table":{"description":"Table contains the routing table for this processor.\nRequired.","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.routingprocessor.RoutingTableItem"},"title":"table","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.routingprocessor.RoutingTableItem":{"additionalProperties":false,"description":"RoutingTableItem specifies how data should be routed to the different exporters","properties":{"exporters":{"description":"Exporters contains the list of exporters to use when the value from the FromAttribute field matches this table item.\nWhen no exporters are specified, the ones specified under DefaultExporters are used, if any.\nThe routing processor will fail upon the first failure from these exporters.\nOptional.","items":{"type":"string"},"title":"exporters","type":"array"},"statement":{"description":"Statement is a OTTL statement used for making a routing decision.\nRequired when 'Value' isn't provided.","title":"statement","type":"string"},"value":{"description":"Value represents a possible value for the field specified under FromAttribute.\nRequired when Statement isn't provided.","title":"value","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.schemaprocessor.Config":{"additionalProperties":false,"description":"Config defines the user provided values for the Schema Processor","markdownDescription":"# Schema Transformer Processor\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [development]: traces, metrics, logs |\n| Distributions | [sumo] |\n\n[development]: https://github.com/open-telemetry/opentelemetry-collector#development\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe _Schema Processor_ is used to convert existing telemetry data or signals to a version of the semantic convention defined as part of the configuration.\nThe processor works by using a set of target schema URLs that are used to match incoming signals.\nOn a match, the processor will fetch the schema translation file (if not cached) set by the incoming signal and apply the transformations\nrequired to export as the target semantic convention version.\n\nFurthermore, it is also possible for organisations and vendors to publish their own semantic conventions and be used by this processor, \nbe sure to follow [schema overview](https://opentelemetry.io/docs/reference/specification/schemas/overview/) for all the details.\n\n## Caching Schema Translation Files\n\nIn order to improve efficiency of the processor, the `prefetch` option allows the processor to start downloading and preparing\nthe translations needed for signals that match the schema URL.\n\n## Schema Formats\n\nA schema URl is made up in two parts, _Schema Family_ and _Schema Version_, the schema URL is broken down like so:\n\n```text\n| Schema URL |\n| https://example.com/telemetry/schemas/ | | 1.0.1 |\n| Schema Family | | Schema Version |\n```\n\nThe final path in the schema URL _MUST_ be the schema version and the preceding portion of the URL is the _Schema Family_.\nTo read about schema formats, please read more [here](https://opentelemetry.io/docs/reference/specification/schemas/overview/#schema-url)\n\n## Targets Schemas\n\nTargets define a set of schema URLs with a schema identifier that will be used to translate any schema URL that matches the target URL to that version.\nIn the event that the processor matches a signal to a target, the processor will translate the signal from the published one to the defined identifier;\nfor example using the configuration below, a signal published with the `https://opentelemetry.io/schemas/1.8.0` schema will be translated \nby the collector to the `https//opentelemetry.io/schemas/1.6.1` schema.\nWithin the schema targets, no duplicate schema families are allowed and will report an error if detected.\n\n\n# Example\n\n```yaml\nprocessors:\n schema:\n prefetch:\n - https://opentelemetry.io/schemas/1.9.0\n targets:\n - https://opentelemetry.io/schemas/1.6.1\n - http://example.com/telemetry/schemas/1.0.1\n```\n\nFor more complete examples, please refer to [config.yml](./testdata/config.yml).","properties":{"CustomRoundTripper":{"description":"Custom Round Tripper to allow for individual components to intercept HTTP requests","title":"CustomRoundTripper"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing HTTP calls.","title":"auth"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"endpoint":{"description":"The target URL to send data to (e.g.: http://some.url:9411/v1/traces).","title":"endpoint","type":"string"},"headers":{"description":"Additional headers attached to each HTTP request sent by the client.\nExisting header values are overwritten if collision happens.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"idle_conn_timeout":{"description":"IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"idle_conn_timeout","type":"string"},"max_conns_per_host":{"description":"MaxConnsPerHost limits the total number of connections per host, including connections in the dialing,\nactive, and idle states.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_conns_per_host","type":"integer"},"max_idle_conns":{"description":"MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns","type":"integer"},"max_idle_conns_per_host":{"description":"MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns_per_host","type":"integer"},"prefetch":{"description":"PreCache is a list of schema URLs that are downloaded\nand cached at the start of the collector runtime\nin order to avoid fetching data that later on could\nblock processing of signals. (Optional field)","items":{"type":"string"},"title":"prefetch","type":"array"},"read_buffer_size":{"description":"ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize.","title":"read_buffer_size","type":"integer"},"targets":{"description":"Targets define what schema families should be\ntranslated to, allowing older and newer formats\nto conform to the target schema identifier.","items":{"type":"string"},"title":"targets","type":"array"},"timeout":{"description":"Timeout parameter configures `http.Client.Timeout`.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"write_buffer_size":{"description":"WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize.","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.servicegraphprocessor.Config":{"additionalProperties":false,"description":"Config defines the configuration options for servicegraphprocessor.","markdownDescription":"# Service graph processor\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [alpha]: traces |\n| Distributions | [contrib], [sumo] |\n\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe service graphs processor is a traces processor that builds a map representing the interrelationships between various services in a system.\nThe processor will analyse trace data and generate metrics describing the relationship between the services.\nThese metrics can be used by data visualization apps (e.g. Grafana) to draw a service graph.\n\nService graphs are useful for a number of use-cases:\n\n* Infer the topology of a distributed system. As distributed systems grow, they become more complex. Service graphs can help you understand the structure of the system.\n* Provide a high level overview of the health of your system.\nService graphs show error rates, latencies, among other relevant data.\n* Provide an historic view of a system’s topology.\nDistributed systems change very frequently,\nand service graphs offer a way of seeing how these systems have evolved over time.\n\nThis component is based on [Grafana Tempo's service graph processor](https://github.com/grafana/tempo/tree/main/modules/generator/processor/servicegraphs).\n\n## How it works\n\nService graphs work by inspecting traces and looking for spans with parent-children relationship that represent a request.\nThe processor uses the OpenTelemetry semantic conventions to detect a myriad of requests.\nIt currently supports the following requests:\n\n* A direct request between two services where the outgoing and the incoming span must have `span.kind` client and server respectively.\n* A request across a messaging system where the outgoing and the incoming span must have `span.kind` producer and consumer respectively.\n* A database request; in this case the processor looks for spans containing attributes `span.kind`=client as well as db.name.\n\nEvery span that can be paired up to form a request is kept in an in-memory store,\nuntil its corresponding pair span is received or the maximum waiting time has passed.\nWhen either of these conditions are reached, the request is recorded and removed from the local store.\n\nEach emitted metrics series have the client and server label corresponding with the service doing the request and the service receiving the request.\n\n```\ntraces_service_graph_request_total{client=\"app\", server=\"db\", connection_type=\"database\"} 20\n```\n\nTLDR: The processor will try to find spans belonging to requests as seen from the client and the server and will create a metric representing an edge in the graph.\n\n## Metrics\n\nThe following metrics are emitted by the processor:\n\n| Metric | Type | Labels | Description |\n|---------------------------------------------|-----------|---------------------------------|--------------------------------------------------------------|\n| traces_service_graph_request_total | Counter | client, server, connection_type | Total count of requests between two nodes |\n| traces_service_graph_request_failed_total | Counter | client, server, connection_type | Total count of failed requests between two nodes |\n| traces_service_graph_request_server_seconds | Histogram | client, server, connection_type | Time for a request between two nodes as seen from the server |\n| traces_service_graph_request_client_seconds | Histogram | client, server, connection_type | Time for a request between two nodes as seen from the client |\n| traces_service_graph_unpaired_spans_total | Counter | client, server, connection_type | Total count of unpaired spans |\n| traces_service_graph_dropped_spans_total | Counter | client, server, connection_type | Total count of dropped spans |\n\nDuration is measured both from the client and the server sides.\n\nPossible values for `connection_type`: unset, `messaging_system`, or `database`.\n\nAdditional labels can be included using the `dimensions` configuration option. Those labels will have a prefix to mark where they originate (client or server span kinds).\nThe `client_` prefix relates to the dimensions coming from spans with `SPAN_KIND_CLIENT`, and the `server_` prefix relates to the\ndimensions coming from spans with `SPAN_KIND_SERVER`.\n\nSince the service graph processor has to process both sides of an edge,\nit needs to process all spans of a trace to function properly.\nIf spans of a trace are spread out over multiple instances, spans are not paired up reliably.\nA possible solution to this problem is using the [load balancing exporter](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/loadbalancingexporter)\nin a layer on front of collector instances running this processor.\n\n## Visualization\n\nService graph metrics are natively supported by Grafana since v9.0.4.\nTo run it, configure a Tempo data source's 'Service Graphs' by linking to the Prometheus backend where metrics are being sent:\n\n```yaml\napiVersion: 1\ndatasources:\n # Prometheus backend where metrics are sent\n - name: Prometheus\n type: prometheus\n uid: prometheus\n url: \u003cprometheus-url\u003e\n jsonData:\n httpMethod: GET\n version: 1\n - name: Tempo\n type: tempo\n uid: tempo\n url: \u003ctempo-url\u003e\n jsonData:\n httpMethod: GET\n serviceMap:\n datasourceUid: 'prometheus'\n version: 1\n```\n\n## Configuration\n\nThe following settings are required:\n\n- `metrics_exporter`: the name of the exporter that this processor will write metrics to. This exporter **must** be present in a pipeline.\n- `latency_histogram_buckets`: the list of durations defining the latency histogram buckets.\n - Default: `[2ms, 4ms, 6ms, 8ms, 10ms, 50ms, 100ms, 200ms, 400ms, 800ms, 1s, 1400ms, 2s, 5s, 10s, 15s]`\n- `dimensions`: the list of dimensions to add together with the default dimensions defined above.\n\nThe following settings can be optionally configured:\n\n- `store` defines the config for the in-memory store used to find requests between services by pairing spans.\n - `ttl` - TTL is the time to live for items in the store.\n - Default: `2ms`\n - `max_items` - MaxItems is the maximum number of items to keep in the store.\n - Default: `1000` \n- `cache_loop` - the time to cleans the cache periodically\n- `store_expiration_loop` the time to expire old entries from the store periodically.\n- `virtual_node_peer_attributes` the list of attributes need to match for building virtual server node, the higher the front, the higher the priority.\n - Default: `[db.name, net.sock.peer.addr, net.peer.name, rpc.service, net.sock.peer.name, net.peer.name, http.url, http.target]`\n\n## Example configuration\n\n```yaml\nreceivers:\n otlp:\n protocols:\n grpc:\n otlp/servicegraph: # Dummy receiver for the metrics pipeline\n protocols:\n grpc:\n endpoint: localhost:12345\n\nprocessors:\n servicegraph:\n metrics_exporter: prometheus/servicegraph # Exporter to send metrics to\n latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 100ms, 250ms] # Buckets for latency histogram\n dimensions: [cluster, namespace] # Additional dimensions (labels) to be added to the metrics extracted from the resource and span attributes\n store: # Configuration for the in-memory store\n ttl: 2s # Value to wait for an edge to be completed\n max_items: 200 # Amount of edges that will be stored in the storeMap \n cache_loop: 2m # the time to cleans the cache periodically\n store_expiration_loop: 10s # the time to expire old entries from the store periodically.\n virtual_node_peer_attributes:\n - db.name\n - rpc.service\nexporters:\n prometheus/servicegraph:\n endpoint: localhost:9090\n namespace: servicegraph\n otlp:\n endpoint: localhost:4317\n\nservice:\n pipelines:\n traces:\n receivers: [otlp]\n processors: [servicegraph]\n exporters: [otlp]\n metrics/servicegraph:\n receivers: [otlp/servicegraph]\n processors: []\n exporters: [prometheus/servicegraph]\n```\n\n## Features and Feature-Gates\n\nSee the [Collector feature gates](https://github.com/open-telemetry/opentelemetry-collector/blob/main/featuregate/README.md#collector-feature-gates) for an overview of feature gates in the collector.","properties":{"cache_loop":{"description":"CacheLoop is the time to cleans the cache periodically.","title":"cache_loop","type":"string"},"dimensions":{"description":"Dimensions defines the list of additional dimensions on top of the provided:\n- client\n- server\n- failed\n- connection_type\nThe dimensions will be fetched from the span's attributes. Examples of some conventionally used attributes:\nhttps://github.com/open-telemetry/opentelemetry-collector/blob/main/model/semconv/opentelemetry.go.","items":{"type":"string"},"title":"dimensions","type":"array"},"latency_histogram_buckets":{"description":"LatencyHistogramBuckets is the list of durations representing latency histogram buckets.\nSee defaultLatencyHistogramBucketsMs in processor.go for the default value.","items":{"type":"string"},"title":"latency_histogram_buckets","type":"array"},"metrics_exporter":{"description":"MetricsExporter is the name of the metrics exporter to use to ship metrics.","title":"metrics_exporter","type":"string"},"store":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.servicegraphprocessor.StoreConfig","description":"Store contains the config for the in-memory store used to find requests between services by pairing spans.","title":"store"},"store_expiration_loop":{"description":"CacheLoop is the time to expire old entries from the store periodically.","title":"store_expiration_loop","type":"string"},"virtual_node_peer_attributes":{"description":"VirtualNodePeerAttributes the list of attributes need to match, the higher the front, the higher the priority.","items":{"type":"string"},"title":"virtual_node_peer_attributes","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.servicegraphprocessor.StoreConfig":{"additionalProperties":false,"properties":{"max_items":{"description":"MaxItems is the maximum number of items to keep in the store.","title":"max_items","type":"integer"},"ttl":{"description":"TTL is the time to live for items in the store.","title":"ttl","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.spanmetricsprocessor.Config":{"additionalProperties":false,"description":"Config defines the configuration options for spanmetricsprocessor.","properties":{"aggregation_temporality":{"title":"aggregation_temporality","type":"string"},"dimensions":{"description":"Dimensions defines the list of additional dimensions on top of the provided:\n- service.name\n- operation\n- span.kind\n- status.code\nThe dimensions will be fetched from the span's attributes. Examples of some conventionally used attributes:\nhttps://github.com/open-telemetry/opentelemetry-collector/blob/main/model/semconv/opentelemetry.go.","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.spanmetricsprocessor.Dimension"},"title":"dimensions","type":"array"},"dimensions_cache_size":{"description":"DimensionsCacheSize defines the size of cache for storing Dimensions, which helps to avoid cache memory growing\nindefinitely over the lifetime of the collector.\nOptional. See defaultDimensionsCacheSize in processor.go for the default value.","title":"dimensions_cache_size","type":"integer"},"latency_histogram_buckets":{"description":"LatencyHistogramBuckets is the list of durations representing latency histogram buckets.\nSee defaultLatencyHistogramBucketsMs in processor.go for the default value.","items":{"type":"string"},"title":"latency_histogram_buckets","type":"array"},"metrics_exporter":{"description":"MetricsExporter is the name of the metrics exporter to use to ship metrics.","title":"metrics_exporter","type":"string"},"metrics_flush_interval":{"description":"MetricsEmitInterval is the time period between when metrics are flushed or emitted to the configured MetricsExporter.","title":"metrics_flush_interval","type":"string"},"namespace":{"description":"Namespace is the namespace to use for the metrics.","title":"namespace","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.spanmetricsprocessor.Dimension":{"additionalProperties":false,"description":"Dimension defines the dimension name and optional default value if the Dimension is missing from a span attribute.","markdownDescription":"# Span Metrics Processor\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [deprecated]: traces |\n| Distributions | [contrib], [observiq], [splunk], [sumo] |\n\n[deprecated]: https://github.com/open-telemetry/opentelemetry-collector#deprecated\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\n**Note**: The `spanmetrics` processor is **deprecated** in favour of the [spanmetrics](../../connector/spanmetricsconnector/README.md) connector.\n\n**Note:** Currently experimental and subject to breaking changes (e.g. change from processor to exporter/translator component).\nSee: https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/403.\n\nAggregates Request, Error and Duration (R.E.D) metrics from span data.\n\n**Request** counts are computed as the number of spans seen per unique set of dimensions, including Errors.\nFor example, the following metric shows 142 calls:\n```\ncalls_total{http_method=\"GET\",http_status_code=\"200\",operation=\"/Address\",service_name=\"shippingservice\",span_kind=\"SPAN_KIND_SERVER\",status_code=\"STATUS_CODE_UNSET\"} 142\n```\nMultiple metrics can be aggregated if, for instance, a user wishes to view call counts just on `service_name` and `operation`.\n\n**Error** counts are computed from the Request counts which have an \"Error\" Status Code metric dimension.\nFor example, the following metric indicates 220 errors:\n```\ncalls_total{http_method=\"GET\",http_status_code=\"503\",operation=\"/checkout\",service_name=\"frontend\",span_kind=\"SPAN_KIND_CLIENT\",status_code=\"STATUS_CODE_ERROR\"} 220\n```\n\n**Duration** is computed from the difference between the span start and end times and inserted into the\nrelevant latency histogram time bucket for each unique set dimensions.\nFor example, the following latency buckets indicate the vast majority of spans (9K) have a 100ms latency:\n```\nlatency_bucket{http_method=\"GET\",http_status_code=\"200\",label1=\"value1\",operation=\"/Address\",service_name=\"shippingservice\",span_kind=\"SPAN_KIND_SERVER\",status_code=\"STATUS_CODE_UNSET\",le=\"2\"} 327\nlatency_bucket{http_method=\"GET\",http_status_code=\"200\",label1=\"value1\",operation=\"/Address\",service_name=\"shippingservice\",span_kind=\"SPAN_KIND_SERVER\",status_code=\"STATUS_CODE_UNSET\",le=\"6\"} 751\nlatency_bucket{http_method=\"GET\",http_status_code=\"200\",label1=\"value1\",operation=\"/Address\",service_name=\"shippingservice\",span_kind=\"SPAN_KIND_SERVER\",status_code=\"STATUS_CODE_UNSET\",le=\"10\"} 1195\nlatency_bucket{http_method=\"GET\",http_status_code=\"200\",label1=\"value1\",operation=\"/Address\",service_name=\"shippingservice\",span_kind=\"SPAN_KIND_SERVER\",status_code=\"STATUS_CODE_UNSET\",le=\"100\"} 10180\nlatency_bucket{http_method=\"GET\",http_status_code=\"200\",label1=\"value1\",operation=\"/Address\",service_name=\"shippingservice\",span_kind=\"SPAN_KIND_SERVER\",status_code=\"STATUS_CODE_UNSET\",le=\"250\"} 10180\n...\n```\n\nEach metric will have _at least_ the following dimensions because they are common across all spans:\n- Service name\n- Operation\n- Span kind\n- Status code\n\nThis processor lets traces to continue through the pipeline unmodified.\n\nThe following settings are required:\n\n- `metrics_exporter`: the name of the exporter that this processor will write metrics to. This exporter **must** be present in a pipeline.\n\nThe following settings can be optionally configured:\n\n- `latency_histogram_buckets`: the list of durations defining the latency histogram buckets.\n - Default: `[2ms, 4ms, 6ms, 8ms, 10ms, 50ms, 100ms, 200ms, 400ms, 800ms, 1s, 1400ms, 2s, 5s, 10s, 15s]`\n- `dimensions`: the list of dimensions to add together with the default dimensions defined above.\n \n Each additional dimension is defined with a `name` which is looked up in the span's collection of attributes or\n resource attributes (AKA process tags) such as `ip`, `host.name` or `region`.\n \n If the `name`d attribute is missing in the span, the optional provided `default` is used.\n \n If no `default` is provided, this dimension will be **omitted** from the metric.\n- `dimensions_cache_size`: the size of cache for storing Dimensions to improve collectors memory usage.\n - Default: `1000`.\n- `aggregation_temporality`: Defines the aggregation temporality of the generated metrics. \n One of either `AGGREGATION_TEMPORALITY_CUMULATIVE` or `AGGREGATION_TEMPORALITY_DELTA`.\n - Default: `AGGREGATION_TEMPORALITY_CUMULATIVE`\n- `namespace`: Defines the namespace of the generated metrics. If `namespace` provided, generated metric name will be added `namespace.` prefix.\n- `metrics_flush_interval`: Defines the flush interval of the generated metrics.\n - Default: `15s`.\n\n## Examples\n\nThe following is a simple example usage of the spanmetrics processor.\n\nFor configuration examples on other use cases, please refer to [More Examples](#more-examples).\n\nThe full list of settings exposed for this processor are documented [here](./config.go).\n\n```yaml\nreceivers:\n jaeger:\n protocols:\n thrift_http:\n endpoint: \"0.0.0.0:14278\"\n\n # Dummy receiver that's never used, because a pipeline is required to have one.\n otlp/spanmetrics:\n protocols:\n grpc:\n endpoint: \"localhost:12345\"\n\n otlp:\n protocols:\n grpc:\n endpoint: \"localhost:55677\"\n\nprocessors:\n batch:\n spanmetrics:\n metrics_exporter: otlp/spanmetrics\n latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 100ms, 250ms]\n dimensions:\n - name: http.method\n default: GET\n - name: http.status_code\n dimensions_cache_size: 1000\n aggregation_temporality: \"AGGREGATION_TEMPORALITY_CUMULATIVE\" \n metrics_flush_interval: 15s\n\nexporters:\n jaeger:\n endpoint: localhost:14250\n\n otlp/spanmetrics:\n endpoint: \"localhost:55677\"\n tls:\n insecure: true\n\n prometheus:\n endpoint: \"0.0.0.0:8889\"\n\nservice:\n pipelines:\n traces:\n receivers: [jaeger]\n processors: [spanmetrics, batch]\n exporters: [jaeger]\n\n # The exporter name must match the metrics_exporter name.\n # The receiver is just a dummy and never used; added to pass validation requiring at least one receiver in a pipeline.\n metrics/spanmetrics:\n receivers: [otlp/spanmetrics]\n exporters: [otlp/spanmetrics]\n\n metrics:\n receivers: [otlp]\n exporters: [prometheus]\n```\n\n### More Examples\n\nFor more example configuration covering various other use cases, please visit the [testdata directory](./testdata).","properties":{"default":{"title":"default","type":"string"},"name":{"title":"name","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.spanprocessor.Config":{"additionalProperties":false,"description":"Config is the configuration for the span processor.","markdownDescription":"# Span Processor\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [alpha]: traces |\n| Distributions | [core], [contrib], [aws], [observiq], [redhat], [splunk], [sumo] |\n\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha\n[core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[aws]: https://github.com/aws-observability/aws-otel-collector\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[redhat]: https://github.com/os-observability/redhat-opentelemetry-collector\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe span processor modifies the span name based on its attributes or extract span attributes from the span name. It also allows\nto change span status. Please refer to [config.go](./config.go) for the config spec.\n\nIt optionally supports the ability to [include/exclude spans](../attributesprocessor/README.md#includeexclude-filtering).\n\nThe following actions are supported:\n\n- `name`: Modify the name of attributes within a span\n- `status`: Modify the status of the span\n\n### Name a span\n\nThe following settings are required:\n\n- `from_attributes`: The attribute value for the keys are used to create a\nnew name in the order specified in the configuration.\n\nThe following settings can be optionally configured:\n\n- `separator`: A string, which is specified will be used to split values\n\nNote: If renaming is dependent on attributes being modified by the `attributes`\nprocessor, ensure the `span` processor is specified after the `attributes`\nprocessor in the `pipeline` specification.\n\n```yaml\nspan:\n name:\n # from_attributes represents the attribute keys to pull the values from to generate the\n # new span name.\n from_attributes: [\u003ckey1\u003e, \u003ckey2\u003e, ...]\n # Separator is the string used to concatenate various parts of the span name.\n separator: \u003cvalue\u003e\n```\n\nExample:\n\n```yaml\nspan:\n name:\n from_attributes: [\"db.svc\", \"operation\"]\n separator: \"::\"\n```\n\nRefer to [config.yaml](./testdata/config.yaml) for detailed\nexamples on using the processor.\n\n### Extract attributes from span name\n\nTakes a list of regular expressions to match span name against and extract\nattributes from it based on subexpressions. Must be specified under the\n`to_attributes` section.\n\nThe following settings are required:\n\n- `rules`: A list of rules to extract attribute values from span name. The values\nin the span name are replaced by extracted attribute names. Each rule in the list\nis regex pattern string. Span name is checked against the regex and if the regex\nmatches then all named subexpressions of the regex are extracted as attributes\nand are added to the span. Each subexpression name becomes an attribute name and\nsubexpression matched portion becomes the attribute value. The matched portion\nin the span name is replaced by extracted attribute name. If the attributes\nalready exist in the span then they will be overwritten. The process is repeated\nfor all rules in the order they are specified. Each subsequent rule works on the\nspan name that is the output after processing the previous rule.\n- `break_after_match` (default = false): specifies if processing of rules should stop after the first\nmatch. If it is false rule processing will continue to be performed over the\nmodified span name.\n\n```yaml\nspan/to_attributes:\n name:\n to_attributes:\n rules:\n - regexp-rule1\n - regexp-rule2\n - regexp-rule3\n ...\n break_after_match: \u003ctrue|false\u003e\n\n```\n\nExample:\n\n```yaml\n# Let's assume input span name is /api/v1/document/12345678/update\n# Applying the following results in output span name /api/v1/document/{documentId}/update\n# and will add a new attribute \"documentId\"=\"12345678\" to the span.\nspan/to_attributes:\n name:\n to_attributes:\n rules:\n - ^\\/api\\/v1\\/document\\/(?P\u003cdocumentId\u003e.*)\\/update$\n```\n\n### Set status for span\n\nThe following setting is required:\n\n- `code`: Represents span status. One of the following values \"Unset\", \"Error\", \"Ok\".\n\nThe following setting is allowed only for code \"Error\":\n- `description`\n\nExample:\n\n```yaml\n# Set status allows to set specific status for a given span. Possible values are\n# Ok, Error and Unset as per\n# https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status\n# The description field allows to set a human-readable message for errors.\nspan/set_status:\n status:\n code: Error\n description: \"some error description\"\n```\n\nRefer to [config.yaml](./testdata/config.yaml) for detailed\nexamples on using the processor.","properties":{"exclude":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.internal.filter.filterconfig.MatchProperties","description":"Exclude specifies when this processor will not be applied to the input data\nwhich match the specified properties.\nNote: The `exclude` properties are checked after the `include` properties,\nif they exist, are checked.\nIf `include` isn't specified, the `exclude` properties are checked against\nall input data.\nThis is an optional field. If neither `include` and `exclude` are set, all input data\nis processed. If `exclude` is set and `include` isn't set, then all the\ninput data that does not match the properties in this structure are processed.","title":"exclude"},"include":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.internal.filter.filterconfig.MatchProperties","description":"Include specifies the set of input data properties that must be present in order\nfor this processor to apply to it.\nNote: If `exclude` is specified, the input data is compared against those\nproperties after the `include` properties.\nThis is an optional field. If neither `include` and `exclude` are set, all input data\nare processed. If `include` is set and `exclude` isn't set, then all\ninput data matching the properties in this structure are processed.","title":"include"},"name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.spanprocessor.Name","description":"Rename specifies the components required to re-name a span.\nThe `from_attributes` field needs to be set for this processor to be properly\nconfigured.\nNote: The field name is `Rename` to avoid collision with the Name() method\nfrom config.NamedEntity","title":"name"},"status":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.spanprocessor.Status","description":"SetStatus specifies status which should be set for this span.","title":"status"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.spanprocessor.Name":{"additionalProperties":false,"description":"Name specifies the attributes to use to re-name a span.","properties":{"from_attributes":{"description":"FromAttributes represents the attribute keys to pull the values from to\ngenerate the new span name. All attribute keys are required in the span\nto re-name a span. If any attribute is missing from the span, no re-name\nwill occur.\nNote: The new span name is constructed in order of the `from_attributes`\nspecified in the configuration. This field is required and cannot be empty.","items":{"type":"string"},"title":"from_attributes","type":"array"},"separator":{"description":"Separator is the string used to separate attributes values in the new\nspan name. If no value is set, no separator is used between attribute\nvalues. Used with FromAttributes only.","title":"separator","type":"string"},"to_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.spanprocessor.ToAttributes","description":"ToAttributes specifies a configuration to extract attributes from span name.","title":"to_attributes"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.spanprocessor.Status":{"additionalProperties":false,"properties":{"code":{"description":"Code is one of three values \"Ok\" or \"Error\" or \"Unset\". Please check:\nhttps://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status","title":"code","type":"string"},"description":{"description":"Description is an optional field documenting Error statuses.","title":"description","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.spanprocessor.ToAttributes":{"additionalProperties":false,"description":"ToAttributes specifies a configuration to extract attributes from span name.","properties":{"break_after_match":{"description":"BreakAfterMatch specifies if processing of rules should stop after the first\nmatch. If it is false rule processing will continue to be performed over the\nmodified span name.","title":"break_after_match","type":"boolean"},"rules":{"description":"Rules is a list of rules to extract attribute values from span name. The values\nin the span name are replaced by extracted attribute names. Each rule in the list\nis a regex pattern string. Span name is checked against the regex. If it matches\nthen all named subexpressions of the regex are extracted as attributes\nand are added to the span. Each subexpression name becomes an attribute name and\nsubexpression matched portion becomes the attribute value. The matched portion\nin the span name is replaced by extracted attribute name. If the attributes\nalready exist in the span then they will be overwritten. The process is repeated\nfor all rules in the order they are specified. Each subsequent rule works on the\nspan name that is the output after processing the previous rule.","items":{"type":"string"},"title":"rules","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.AndCfg":{"additionalProperties":false,"properties":{"and_sub_policy":{"items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.AndSubPolicyCfg"},"title":"and_sub_policy","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.AndSubPolicyCfg":{"additionalProperties":false,"description":"AndSubPolicyCfg holds the common configuration to all policies under and policy.","properties":{"boolean_attribute":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.BooleanAttributeCfg","title":"boolean_attribute"},"latency":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.LatencyCfg","title":"latency"},"name":{"title":"name","type":"string"},"numeric_attribute":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.NumericAttributeCfg","title":"numeric_attribute"},"ottl_condition":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.OTTLConditionCfg","title":"ottl_condition"},"probabilistic":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.ProbabilisticCfg","title":"probabilistic"},"rate_limiting":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.RateLimitingCfg","title":"rate_limiting"},"span_count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.SpanCountCfg","title":"span_count"},"status_code":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.StatusCodeCfg","title":"status_code"},"string_attribute":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.StringAttributeCfg","title":"string_attribute"},"trace_state":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.TraceStateCfg","title":"trace_state"},"type":{"title":"type","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.BooleanAttributeCfg":{"additionalProperties":false,"description":"BooleanAttributeCfg holds the configurable settings to create a boolean attribute filter sampling policy evaluator.","properties":{"key":{"description":"Tag that the filter is going to be matching against.","title":"key","type":"string"},"value":{"description":"Value indicate the bool value, either true or false to use when matching against attribute values.\nBooleanAttribute Policy will apply exact value match on Value","title":"value","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.CompositeCfg":{"additionalProperties":false,"description":"CompositeCfg holds the configurable settings to create a composite sampling policy evaluator.","properties":{"composite_sub_policy":{"items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.CompositeSubPolicyCfg"},"title":"composite_sub_policy","type":"array"},"max_total_spans_per_second":{"title":"max_total_spans_per_second","type":"integer"},"policy_order":{"items":{"type":"string"},"title":"policy_order","type":"array"},"rate_allocation":{"items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.RateAllocationCfg"},"title":"rate_allocation","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.CompositeSubPolicyCfg":{"additionalProperties":false,"description":"CompositeSubPolicyCfg holds the common configuration to all policies under composite policy.","properties":{"and":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.AndCfg","description":"Configs for and policy evaluator.","title":"and"},"boolean_attribute":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.BooleanAttributeCfg","title":"boolean_attribute"},"latency":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.LatencyCfg","title":"latency"},"name":{"title":"name","type":"string"},"numeric_attribute":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.NumericAttributeCfg","title":"numeric_attribute"},"ottl_condition":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.OTTLConditionCfg","title":"ottl_condition"},"probabilistic":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.ProbabilisticCfg","title":"probabilistic"},"rate_limiting":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.RateLimitingCfg","title":"rate_limiting"},"span_count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.SpanCountCfg","title":"span_count"},"status_code":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.StatusCodeCfg","title":"status_code"},"string_attribute":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.StringAttributeCfg","title":"string_attribute"},"trace_state":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.TraceStateCfg","title":"trace_state"},"type":{"title":"type","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.Config":{"additionalProperties":false,"description":"Config holds the configuration for tail-based sampling.","properties":{"decision_wait":{"description":"DecisionWait is the desired wait time from the arrival of the first span of\ntrace until the decision about sampling it or not is evaluated.","title":"decision_wait","type":"string"},"expected_new_traces_per_sec":{"description":"ExpectedNewTracesPerSec sets the expected number of new traces sending to the tail sampling processor\nper second. This helps with allocating data structures with closer to actual usage size.","title":"expected_new_traces_per_sec","type":"integer"},"num_traces":{"description":"NumTraces is the number of traces kept on memory. Typically most of the data\nof a trace is released after a sampling decision is taken.","title":"num_traces","type":"integer"},"policies":{"description":"PolicyCfgs sets the tail-based sampling policy which makes a sampling decision\nfor a given trace when requested.","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.PolicyCfg"},"title":"policies","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.LatencyCfg":{"additionalProperties":false,"description":"LatencyCfg holds the configurable settings to create a latency filter sampling policy evaluator","properties":{"threshold_ms":{"description":"ThresholdMs in milliseconds.","title":"threshold_ms","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.NumericAttributeCfg":{"additionalProperties":false,"description":"NumericAttributeCfg holds the configurable settings to create a numeric attribute filter sampling policy evaluator.","properties":{"key":{"description":"Tag that the filter is going to be matching against.","title":"key","type":"string"},"max_value":{"description":"MaxValue is the maximum value of the attribute to be considered a match.","title":"max_value","type":"integer"},"min_value":{"description":"MinValue is the minimum value of the attribute to be considered a match.","title":"min_value","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.OTTLConditionCfg":{"additionalProperties":false,"description":"OTTLConditionCfg holds the configurable setting to create a OTTL condition filter sampling policy evaluator.","properties":{"error_mode":{"title":"error_mode","type":"string"},"span":{"items":{"type":"string"},"title":"span","type":"array"},"spanevent":{"items":{"type":"string"},"title":"spanevent","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.PolicyCfg":{"additionalProperties":false,"description":"PolicyCfg holds the common configuration to all policies.","properties":{"and":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.AndCfg","description":"Configs for defining and policy","title":"and"},"boolean_attribute":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.BooleanAttributeCfg","title":"boolean_attribute"},"composite":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.CompositeCfg","description":"Configs for defining composite policy","title":"composite"},"latency":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.LatencyCfg","title":"latency"},"name":{"title":"name","type":"string"},"numeric_attribute":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.NumericAttributeCfg","title":"numeric_attribute"},"ottl_condition":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.OTTLConditionCfg","title":"ottl_condition"},"probabilistic":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.ProbabilisticCfg","title":"probabilistic"},"rate_limiting":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.RateLimitingCfg","title":"rate_limiting"},"span_count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.SpanCountCfg","title":"span_count"},"status_code":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.StatusCodeCfg","title":"status_code"},"string_attribute":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.StringAttributeCfg","title":"string_attribute"},"trace_state":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.TraceStateCfg","title":"trace_state"},"type":{"title":"type","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.ProbabilisticCfg":{"additionalProperties":false,"description":"ProbabilisticCfg holds the configurable settings to create a probabilistic sampling policy evaluator.","properties":{"hash_salt":{"description":"HashSalt allows one to configure the hashing salts. This is important in scenarios where multiple layers of collectors\nhave different sampling rates: if they use the same salt all passing one layer may pass the other even if they have\ndifferent sampling rates, configuring different salts avoids that.","title":"hash_salt","type":"string"},"sampling_percentage":{"description":"SamplingPercentage is the percentage rate at which traces are going to be sampled. Defaults to zero, i.e.: no sample.\nValues greater or equal 100 are treated as \"sample all traces\".","title":"sampling_percentage","type":"number"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.RateAllocationCfg":{"additionalProperties":false,"description":"RateAllocationCfg used within composite policy","properties":{"percent":{"title":"percent","type":"integer"},"policy":{"title":"policy","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.RateLimitingCfg":{"additionalProperties":false,"description":"RateLimitingCfg holds the configurable settings to create a rate limiting sampling policy evaluator.","properties":{"spans_per_second":{"description":"SpansPerSecond sets the limit on the maximum nuber of spans that can be processed each second.","title":"spans_per_second","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.SpanCountCfg":{"additionalProperties":false,"description":"SpanCountCfg holds the configurable settings to create a Span Count filter sampling policy evaluator","properties":{"max_spans":{"title":"max_spans","type":"integer"},"min_spans":{"description":"Minimum number of spans in a Trace","title":"min_spans","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.StatusCodeCfg":{"additionalProperties":false,"description":"StatusCodeCfg holds the configurable settings to create a status code filter sampling policy evaluator.","properties":{"status_codes":{"items":{"type":"string"},"title":"status_codes","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.StringAttributeCfg":{"additionalProperties":false,"description":"StringAttributeCfg holds the configurable settings to create a string attribute filter sampling policy evaluator.","properties":{"cache_max_size":{"description":"CacheMaxSize is the maximum number of attribute entries of LRU Cache that stores the matched result\nfrom the regular expressions defined in Values.\nCacheMaxSize will not be used if EnabledRegexMatching is set to false.","title":"cache_max_size","type":"integer"},"enabled_regex_matching":{"description":"EnabledRegexMatching determines whether match attribute values by regexp string.","title":"enabled_regex_matching","type":"boolean"},"invert_match":{"description":"InvertMatch indicates that values or regular expressions must not match against attribute values.\nIf InvertMatch is true and Values is equal to 'acme', all other values will be sampled except 'acme'.\nAlso, if the specified Key does not match on any resource or span attributes, data will be sampled.","title":"invert_match","type":"boolean"},"key":{"description":"Tag that the filter is going to be matching against.","title":"key","type":"string"},"values":{"description":"Values indicate the set of values or regular expressions to use when matching against attribute values.\nStringAttribute Policy will apply exact value match on Values unless EnabledRegexMatching is true.","items":{"type":"string"},"title":"values","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.TraceStateCfg":{"additionalProperties":false,"properties":{"key":{"description":"Tag that the filter is going to be matching against.","title":"key","type":"string"},"values":{"description":"Values indicate the set of values to use when matching against trace_state values.","items":{"type":"string"},"title":"values","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.transformprocessor.Config":{"additionalProperties":false,"description":"Config defines the configuration for the processor.","markdownDescription":"# Transform Processor\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [alpha]: traces, metrics, logs |\n| Distributions | [contrib], [observiq], [splunk], [sumo] |\n| Warnings | [Unsound Transformations, Identity Conflict, Orphaned Telemetry, Other](#warnings) |\n\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe transform processor modifies telemetry based on configuration using the [OpenTelemetry Transformation Language](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl).\n\nFor each signal type, the processor takes a list of statements associated to a [Context type](#contexts) and executes the statements against the incoming telemetry in the order specified in the config.\nEach statement can access and transform telemetry using functions and allow the use of a condition to help decide whether the function should be executed.\n\n- [Config](#config)\n- [Grammar](#grammar)\n- [Contexts](#contexts)\n- [Supported functions](#supported-functions)\n- [Examples](#examples)\n- [Contributing](#contributing)\n\n## Config\n\nThe transform processor allows configuring multiple context statements for traces, metrics, and logs.\nThe value of `context` specifies which [OTTL Context](#contexts) to use when interpreting the associated statements.\nThe statement strings, which must be OTTL compatible, will be passed to the OTTL and interpreted using the associated context. \nEach context will be processed in the order specified and each statement for a context will be executed in the order specified.\n\nThe transform processor also allows configuring an optional field, `error_mode`, which will determine how the processor reacts to errors that occur while processing a statement.\n\n| error_mode | description |\n|-----------------------|----------------------------------------------------------------------------------------------------------------------------|\n| ignore | The processor ignores errors returned by statements and continues on to the next statement. This is the recommended mode. |\n| propagate | The processor returns the error up the pipeline. This will result in the payload being dropped from the collector. |\n\nIf not specified, `propagate` will be used.\n\n```yaml\ntransform:\n error_mode: ignore\n \u003ctrace|metric|log\u003e_statements:\n - context: string\n statements:\n - string\n - string\n - string\n - context: string\n statements:\n - string\n - string\n - string\n```\n\nProper use of contexts will provide increased performance and capabilities. See [Contexts](#contexts) for more details.\n\nValid values for `context` are:\n\n| Signal | Context Values |\n|-------------------|------------------------------------------------|\n| trace_statements | `resource`, `scope`, `span`, and `spanevent` |\n| metric_statements | `resource`, `scope`, `metric`, and `datapoint` |\n| log_statements | `resource`, `scope`, and `log` |\n\n### Example\n\nThe example takes advantage of context efficiency by grouping transformations with the context which it intends to transform.\nSee [Contexts](#contexts) for more details.\n\nExample configuration:\n```yaml\ntransform:\n error_mode: ignore\n trace_statements:\n - context: resource\n statements:\n - keep_keys(attributes, [\"service.name\", \"service.namespace\", \"cloud.region\", \"process.command_line\"])\n - replace_pattern(attributes[\"process.command_line\"], \"password\\\\=[^\\\\s]*(\\\\s?)\", \"password=***\")\n - limit(attributes, 100, [])\n - truncate_all(attributes, 4096)\n - context: span\n statements:\n - set(status.code, 1) where attributes[\"http.path\"] == \"/health\"\n - set(name, attributes[\"http.route\"])\n - replace_match(attributes[\"http.target\"], \"/user/*/list/*\", \"/user/{userId}/list/{listId}\")\n - limit(attributes, 100, [])\n - truncate_all(attributes, 4096)\n\n metric_statements:\n - context: resource\n statements:\n - keep_keys(attributes, [\"host.name\"])\n - truncate_all(attributes, 4096)\n - context: metric\n statements:\n - set(description, \"Sum\") where type == \"Sum\"\n - context: datapoint\n statements:\n - limit(attributes, 100, [\"host.name\"])\n - truncate_all(attributes, 4096)\n - convert_sum_to_gauge() where metric.name == \"system.processes.count\"\n - convert_gauge_to_sum(\"cumulative\", false) where metric.name == \"prometheus_metric\"\n \n log_statements:\n - context: resource\n statements:\n - keep_keys(resource.attributes, [\"service.name\", \"service.namespace\", \"cloud.region\"])\n - context: log\n statements:\n - set(severity_text, \"FAIL\") where body == \"request failed\"\n - replace_all_matches(attributes, \"/user/*/list/*\", \"/user/{userId}/list/{listId}\")\n - replace_all_patterns(attributes, \"/account/\\\\d{4}\", \"/account/{accountId}\")\n - set(body, attributes[\"http.route\"])\n```\n\n## Grammar\n\nYou can learn more in-depth details on the capabilities and limitations of the OpenTelemetry Transformation Language used by the transform processor by reading about its [grammar](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl#grammar).\n\n## Contexts\n\nThe transform processor utilizes the OTTL's contexts to transform Resource, Scope, Span, SpanEvent, Metric, DataPoint, and Log telemetry.\nThe contexts allow the OTTL to interact with the underlying telemetry data in its pdata form.\n\n- [Resource Context](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts/ottlresource)\n- [Scope Context](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts/ottlscope)\n- [Span Context](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts/ottlspan) \u003c!-- markdown-link-check-disable-line --\u003e\n- [SpanEvent Context](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts/ottlspanevent)\n- [Metric Context](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts/ottlmetric)\n- [DataPoint Context](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts/ottldatapoint) \u003c!-- markdown-link-check-disable-line --\u003e\n- [Log Context](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts/ottllog) \u003c!-- markdown-link-check-disable-line --\u003e\n\nEach context allows transformation of its type of telemetry. \nFor example, statements associated to a `resource` context will be able to transform the resource's `attributes` and `dropped_attributes_count`.\n\nContexts __NEVER__ supply access to individual items \"lower\" in the protobuf definition.\n- This means statements associated to a `resource` __WILL NOT__ be able to access the underlying instrumentation scopes.\n- This means statements associated to a `scope` __WILL NOT__ be able to access the underlying telemetry slices (spans, metrics, or logs).\n- Similarly, statements associated to a `metric` __WILL NOT__ be able to access individual datapoints, but can access the entire datapoints slice.\n- Similarly, statements associated to a `span` __WILL NOT__ be able to access individual SpanEvents, but can access the entire SpanEvents slice.\n\nFor practical purposes, this means that a context cannot make decisions on its telemetry based on telemetry \"lower\" in the structure.\nFor example, __the following context statement is not possible__ because it attempts to use individual datapoint attributes in the condition of a statements that is associated to a `metric`\n\n```yaml\nmetric_statements:\n- context: metric\n statements:\n - set(description, \"test passed\") where datapoints.attributes[\"test\"] == \"pass\"\n```\n\nContext __ALWAYS__ supply access to the items \"higher\" in the protobuf definition that are associated to the telemetry being transformed.\n- This means that statements associated to a `datapoint` have access to a datapoint's metric, instrumentation scope, and resource.\n- This means that statements associated to a `spanevent` have access to a spanevent's span, instrumentation scope, and resource.\n- This means that statements associated to a `span`/`metric`/`log` have access to the telemetry's instrumentation scope, and resource.\n- This means that statements associated to a `scope` have access to the scope's resource.\n\nFor example, __the following context statement is possible__ because `datapoint` statements can access the datapoint's metric.\n\n```yaml\nmetric_statements:\n- context: datapoint\n statements:\n - set(metric.description, \"test passed\") where attributes[\"test\"] == \"pass\"\n```\n\nWhenever possible, associate your statements to the context that the statement intend to transform.\nAlthough you can modify resource attributes associated to a span using the `span` context, it is more efficient to use the `resource` context.\nThis is because contexts are nested: the efficiency comes because higher-level contexts can avoid iterating through any of the contexts at a lower level. \n\n## Supported functions:\n\nSince the transform processor utilizes the OTTL's contexts for Traces, Metrics, and Logs, it is able to utilize functions that expect pdata in addition to any common functions. These common functions can be used for any signal.\n\u003c!-- markdown-link-check-disable-next-line --\u003e\n- [OTTL Functions](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/ottlfuncs)\n\nIn addition to OTTL functions, the processor defines its own functions to help with transformations specific to this processor:\n\n**Metrics only functions**\n- [convert_sum_to_gauge](#convert_sum_to_gauge)\n- [convert_gauge_to_sum](#convert_gauge_to_sum)\n- [convert_summary_count_val_to_sum](#convert_summary_count_val_to_sum)\n- [convert_summary_sum_val_to_sum](#convert_summary_sum_val_to_sum)\n\n### convert_sum_to_gauge\n\n`convert_sum_to_gauge()`\n\nConverts incoming metrics of type \"Sum\" to type \"Gauge\", retaining the metric's datapoints. Noop for metrics that are not of type \"Sum\".\n\n**NOTE:** This function may cause a metric to break semantics for [Gauge metrics](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#gauge). Use at your own risk.\n\nExamples:\n\n- `convert_sum_to_gauge()`\n\n### convert_gauge_to_sum\n\n`convert_gauge_to_sum(aggregation_temporality, is_monotonic)`\n\nConverts incoming metrics of type \"Gauge\" to type \"Sum\", retaining the metric's datapoints and setting its aggregation temporality and monotonicity accordingly. Noop for metrics that are not of type \"Gauge\".\n\n`aggregation_temporality` is a string (`\"cumulative\"` or `\"delta\"`) that specifies the resultant metric's aggregation temporality. `is_monotonic` is a boolean that specifies the resultant metric's monotonicity. \n\n**NOTE:** This function may cause a metric to break semantics for [Sum metrics](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#sums). Use at your own risk.\n\nExamples:\n\n- `convert_gauge_to_sum(\"cumulative\", false)`\n\n\n- `convert_gauge_to_sum(\"delta\", true)`\n\n### convert_summary_count_val_to_sum\n\n`convert_summary_count_val_to_sum(aggregation_temporality, is_monotonic)`\n\nThe `convert_summary_count_val_to_sum` function creates a new Sum metric from a Summary's count value.\n\n`aggregation_temporality` is a string (`\"cumulative\"` or `\"delta\"`) representing the desired aggregation temporality of the new metric. `is_monotonic` is a boolean representing the monotonicity of the new metric.\n\nThe name for the new metric will be `\u003csummary metric name\u003e_count`. The fields that are copied are: `timestamp`, `starttimestamp`, `attibutes`, and `description`. The new metric that is created will be passed to all functions in the metrics statements list. Function conditions will apply.\n\n**NOTE:** This function may cause a metric to break semantics for [Sum metrics](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#sums). Use at your own risk.\n\nExamples:\n\n- `convert_summary_count_val_to_sum(\"delta\", true)`\n\n\n- `convert_summary_count_val_to_sum(\"cumulative\", false)`\n\n### convert_summary_sum_val_to_sum\n\n`convert_summary_sum_val_to_sum(aggregation_temporality, is_monotonic)`\n\nThe `convert_summary_sum_val_to_sum` function creates a new Sum metric from a Summary's sum value.\n\n`aggregation_temporality` is a string (`\"cumulative\"` or `\"delta\"`) representing the desired aggregation temporality of the new metric. `is_monotonic` is a boolean representing the monotonicity of the new metric.\n\nThe name for the new metric will be `\u003csummary metric name\u003e_sum`. The fields that are copied are: `timestamp`, `starttimestamp`, `attibutes`, and `description`. The new metric that is created will be passed to all functions in the metrics statements list. Function conditions will apply.\n\n**NOTE:** This function may cause a metric to break semantics for [Sum metrics](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#sums). Use at your own risk.\n\nExamples:\n\n- `convert_summary_sum_val_to_sum(\"delta\", true)`\n\n\n- `convert_summary_sum_val_to_sum(\"cumulative\", false)`\n\n## Examples\n\n### Perform transformation if field does not exist\nSet attribute `test` to `\"pass\"` if the attribute `test` does not exist:\n```yaml\ntransform:\n error_mode: ignore\n trace_statements:\n - context: span\n statements:\n # accessing a map with a key that does not exist will return nil. \n - set(attributes[\"test\"], \"pass\") where attributes[\"test\"] == nil\n``` \n\n### Rename attribute\nThere are 2 ways to rename an attribute key:\n\nYou can either set a new attribute and delete the old:\n\n```yaml\ntransform:\n error_mode: ignore\n trace_statements:\n - context: resource\n statements:\n - set(attributes[\"namespace\"], attributes[\"k8s.namespace.name\"])\n - delete_key(attributes, \"k8s.namespace.name\") \n``` \n\nOr you can update the key using regex:\n\n```yaml\ntransform:\n error_mode: ignore\n trace_statements:\n - context: resource\n statements:\n - replace_all_patterns(attributes, \"key\", \"k8s\\\\.namespace\\\\.name\", \"namespace\")\n``` \n\n### Comnbine two attributes\nSet attribute `test` to the value of attributes `\"foo\"` and `\"bar\"` combined. \n```yaml\ntransform:\n error_mode: ignore\n trace_statements:\n - context: resource\n statements:\n # Use Concat function to combine any number of string, separated by a delimiter.\n - set(attributes[\"test\"], Concat([attributes[\"foo\"], attributes[\"bar\"]], \" \")\n```\n\n### Parsing JSON logs\n\nGiven the following json body\n\n```json\n{\n \"name\": \"log\",\n \"attr1\": \"foo\",\n \"attr2\": \"bar\",\n \"nested\": {\n \"attr3\": \"example\"\n }\n}\n```\n\nadd specific fields as attributes on the log:\n\n```yaml\ntransform:\n error_mode: ignore\n log_statements:\n - context: log\n statements:\n # Parse body as JSON and merge the resulting map with the cache map, ignoring non-json bodies.\n # cache is a field exposed by OTTL that is a temporary storage place for complex operations.\n - merge_maps(cache, ParseJSON(body), \"upsert\") where IsMatch(body, \"^\\\\{\") \n \n # Set attributes using the values merged into cache.\n # If the attribute doesn't exist in cache then nothing happens.\n - set(attributes[\"attr1\"], cache[\"attr1\"])\n - set(attributes[\"attr2\"], cache[\"attr2\"])\n \n # To access nested maps you can chain index ([]) operations.\n # If nested or attr3 do no exist in cache then nothing happens.\n - set(attributes[\"nested.attr3\"], cache[\"nested\"][\"attr3\"])\n```\n\n## Contributing\n\nSee [CONTRIBUTING.md](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/transformprocessor/CONTRIBUTING.md).\n\n\n## Warnings\n\nThe transform processor's implementation of the [OpenTelemetry Transformation Language]https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/processing.md#opentelemetry-transformation-language) (OTTL) allows users to modify all aspects of their telemetry. Some specific risks are listed below, but this is not an exhaustive list. In general, understand your data before using the transform processor. \n\n- [Unsound Transformations](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/standard-warnings.md#unsound-transformations): Several Metric-only functions allow you to transform one metric data type to another or create new metrics from an existing metrics. Transformations between metric data types are not defined in the [metrics data model](https://github.com/open-telemetry/opentelemetry-specification/blob/main//specification/metrics/data-model.md). These functions have the expectation that you understand the incoming data and know that it can be meaningfully converted to a new metric data type or can meaningfully be used to create new metrics.\n - Although the OTTL allows the `set` function to be used with `metric.data_type`, its implementation in the transform processor is NOOP. To modify a data type you must use a function specific to that purpose.\n- [Identity Conflict](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/standard-warnings.md#identity-conflict): Transformation of metrics have the potential to affect the identity of a metric leading to an Identity Crisis. Be especially cautious when transforming metric name and when reducing/changing existing attributes. Adding new attributes is safe.\n- [Orphaned Telemetry](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/standard-warnings.md#orphaned-telemetry): The processor allows you to modify `span_id`, `trace_id`, and `parent_span_id` for traces and `span_id`, and `trace_id` logs. Modifying these fields could lead to orphaned spans or logs.","properties":{"error_mode":{"description":"ErrorMode determines how the processor reacts to errors that occur while processing a statement.\nValid values are `ignore` and `propagate`.\n`ignore` means the processor ignores errors returned by statements and continues on to the next statement. This is the recommended mode.\n`propagate` means the processor returns the error up the pipeline. This will result in the payload being dropped from the collector.\nThe default value is `propagate`.","title":"error_mode","type":"string"},"log_statements":{"items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.transformprocessor.internal.common.ContextStatements"},"title":"log_statements","type":"array"},"metric_statements":{"items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.transformprocessor.internal.common.ContextStatements"},"title":"metric_statements","type":"array"},"trace_statements":{"items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.transformprocessor.internal.common.ContextStatements"},"title":"trace_statements","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.processor.transformprocessor.internal.common.ContextStatements":{"additionalProperties":false,"properties":{"context":{"title":"context","type":"string"},"statements":{"items":{"type":"string"},"title":"statements","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.activedirectorydsreceiver.Config":{"additionalProperties":false,"markdownDescription":"# Active Directory Domain Services Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: metrics |\n| Distributions | [contrib], [observiq], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe `active_directory_ds` receiver scrapes metric relating to an Active Directory domain controller using the Windows Performance Counters.\n\n## Configuration\nThe following settings are optional:\n- `metrics` (default: see `DefaultMetricsSettings` [here](./internal/metadata/generated_metrics.go)): Allows enabling and disabling specific metrics from being collected in this receiver.\n- `collection_interval` (default = `10s`): The interval at which metrics are emitted by this receiver.\n- `initial_delay` (default = `1s`): defines how long this receiver waits before starting.\n\nExample:\n```yaml\nreceivers:\n active_directory_ds:\n collection_interval: 10s\n metrics:\n # Disable the active_directory.ds.replication.network.io metric from being emitted\n active_directory.ds.replication.network.io: false\n```\n\nThe full list of settings exposed for this receiver are documented [here](./config.go) with detailed sample configurations [here](./testdata/config.yaml).\n\n## Metrics\n\nDetails about the metrics produced by this receiver can be found in [metadata.yaml](./metadata.yaml)","properties":{"collection_interval":{"title":"collection_interval","type":"string"},"initial_delay":{"title":"initial_delay","type":"string"},"metrics":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.activedirectorydsreceiver.internal.metadata.MetricsConfig","title":"metrics"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.activedirectorydsreceiver.internal.metadata.MetricConfig":{"additionalProperties":false,"description":"MetricConfig provides common config for a particular metric.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.activedirectorydsreceiver.internal.metadata.MetricsConfig":{"additionalProperties":false,"description":"MetricsConfig provides config for active_directory_ds metrics.","properties":{"active_directory.ds.bind.rate":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.activedirectorydsreceiver.internal.metadata.MetricConfig","title":"active_directory.ds.bind.rate"},"active_directory.ds.ldap.bind.last_successful.time":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.activedirectorydsreceiver.internal.metadata.MetricConfig","title":"active_directory.ds.ldap.bind.last_successful.time"},"active_directory.ds.ldap.bind.rate":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.activedirectorydsreceiver.internal.metadata.MetricConfig","title":"active_directory.ds.ldap.bind.rate"},"active_directory.ds.ldap.client.session.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.activedirectorydsreceiver.internal.metadata.MetricConfig","title":"active_directory.ds.ldap.client.session.count"},"active_directory.ds.ldap.search.rate":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.activedirectorydsreceiver.internal.metadata.MetricConfig","title":"active_directory.ds.ldap.search.rate"},"active_directory.ds.name_cache.hit_rate":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.activedirectorydsreceiver.internal.metadata.MetricConfig","title":"active_directory.ds.name_cache.hit_rate"},"active_directory.ds.notification.queued":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.activedirectorydsreceiver.internal.metadata.MetricConfig","title":"active_directory.ds.notification.queued"},"active_directory.ds.operation.rate":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.activedirectorydsreceiver.internal.metadata.MetricConfig","title":"active_directory.ds.operation.rate"},"active_directory.ds.replication.network.io":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.activedirectorydsreceiver.internal.metadata.MetricConfig","title":"active_directory.ds.replication.network.io"},"active_directory.ds.replication.object.rate":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.activedirectorydsreceiver.internal.metadata.MetricConfig","title":"active_directory.ds.replication.object.rate"},"active_directory.ds.replication.operation.pending":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.activedirectorydsreceiver.internal.metadata.MetricConfig","title":"active_directory.ds.replication.operation.pending"},"active_directory.ds.replication.property.rate":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.activedirectorydsreceiver.internal.metadata.MetricConfig","title":"active_directory.ds.replication.property.rate"},"active_directory.ds.replication.sync.object.pending":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.activedirectorydsreceiver.internal.metadata.MetricConfig","title":"active_directory.ds.replication.sync.object.pending"},"active_directory.ds.replication.sync.request.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.activedirectorydsreceiver.internal.metadata.MetricConfig","title":"active_directory.ds.replication.sync.request.count"},"active_directory.ds.replication.value.rate":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.activedirectorydsreceiver.internal.metadata.MetricConfig","title":"active_directory.ds.replication.value.rate"},"active_directory.ds.security_descriptor_propagations_event.queued":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.activedirectorydsreceiver.internal.metadata.MetricConfig","title":"active_directory.ds.security_descriptor_propagations_event.queued"},"active_directory.ds.suboperation.rate":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.activedirectorydsreceiver.internal.metadata.MetricConfig","title":"active_directory.ds.suboperation.rate"},"active_directory.ds.thread.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.activedirectorydsreceiver.internal.metadata.MetricConfig","title":"active_directory.ds.thread.count"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.aerospikereceiver.Config":{"additionalProperties":false,"description":"Config is the receiver configuration","properties":{"collect_cluster_metrics":{"title":"collect_cluster_metrics","type":"boolean"},"collection_interval":{"title":"collection_interval","type":"string"},"endpoint":{"title":"endpoint","type":"string"},"initial_delay":{"title":"initial_delay","type":"string"},"metrics":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.aerospikereceiver.internal.metadata.MetricsConfig","title":"metrics"},"password":{"title":"password","type":"string"},"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.aerospikereceiver.internal.metadata.ResourceAttributesConfig","title":"resource_attributes"},"timeout":{"title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","title":"tls"},"tlsname":{"title":"tlsname","type":"string"},"username":{"title":"username","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.aerospikereceiver.internal.metadata.MetricConfig":{"additionalProperties":false,"description":"MetricConfig provides common config for a particular metric.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.aerospikereceiver.internal.metadata.MetricsConfig":{"additionalProperties":false,"description":"MetricsConfig provides config for aerospike metrics.","properties":{"aerospike.namespace.disk.available":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.aerospikereceiver.internal.metadata.MetricConfig","title":"aerospike.namespace.disk.available"},"aerospike.namespace.geojson.region_query_cells":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.aerospikereceiver.internal.metadata.MetricConfig","title":"aerospike.namespace.geojson.region_query_cells"},"aerospike.namespace.geojson.region_query_false_positive":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.aerospikereceiver.internal.metadata.MetricConfig","title":"aerospike.namespace.geojson.region_query_false_positive"},"aerospike.namespace.geojson.region_query_points":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.aerospikereceiver.internal.metadata.MetricConfig","title":"aerospike.namespace.geojson.region_query_points"},"aerospike.namespace.geojson.region_query_requests":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.aerospikereceiver.internal.metadata.MetricConfig","title":"aerospike.namespace.geojson.region_query_requests"},"aerospike.namespace.memory.free":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.aerospikereceiver.internal.metadata.MetricConfig","title":"aerospike.namespace.memory.free"},"aerospike.namespace.memory.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.aerospikereceiver.internal.metadata.MetricConfig","title":"aerospike.namespace.memory.usage"},"aerospike.namespace.query.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.aerospikereceiver.internal.metadata.MetricConfig","title":"aerospike.namespace.query.count"},"aerospike.namespace.scan.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.aerospikereceiver.internal.metadata.MetricConfig","title":"aerospike.namespace.scan.count"},"aerospike.namespace.transaction.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.aerospikereceiver.internal.metadata.MetricConfig","title":"aerospike.namespace.transaction.count"},"aerospike.node.connection.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.aerospikereceiver.internal.metadata.MetricConfig","title":"aerospike.node.connection.count"},"aerospike.node.connection.open":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.aerospikereceiver.internal.metadata.MetricConfig","title":"aerospike.node.connection.open"},"aerospike.node.memory.free":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.aerospikereceiver.internal.metadata.MetricConfig","title":"aerospike.node.memory.free"},"aerospike.node.query.tracked":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.aerospikereceiver.internal.metadata.MetricConfig","title":"aerospike.node.query.tracked"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.aerospikereceiver.internal.metadata.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig provides common config for a particular resource attribute.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.aerospikereceiver.internal.metadata.ResourceAttributesConfig":{"additionalProperties":false,"description":"ResourceAttributesConfig provides config for aerospike resource attributes.","properties":{"aerospike.namespace":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.aerospikereceiver.internal.metadata.ResourceAttributeConfig","title":"aerospike.namespace"},"aerospike.node.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.aerospikereceiver.internal.metadata.ResourceAttributeConfig","title":"aerospike.node.name"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachereceiver.Config":{"additionalProperties":false,"markdownDescription":"# Apache Web Server Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: metrics |\n| Distributions | [contrib], [observiq], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThis receiver fetches stats from a Apache Web Server instance using the `server-status?auto` endpoint.\n\n## Prerequisites\n\nThis receiver supports Apache Web Server version 2.4+\n\n### mod_status module\n\nIn order to receive server statistics, you must configure the server's `httpd.conf` file to [enable status support](https://httpd.apache.org/docs/2.4/mod/mod_status.html).\n\n\n### Configuration\n\nThe following settings are required:\n- `endpoint` (default: `http://localhost:8080/server-status?auto`): The URL of the httpd status endpoint\n\nThe following settings are optional:\n- `collection_interval` (default = `10s`): This receiver collects metrics on an interval. This value must be a string readable by Golang's [time.ParseDuration](https://pkg.go.dev/time#ParseDuration). Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`.\n- `initial_delay` (default = `1s`): defines how long this receiver waits before starting.\n\n### Example Configuration\n\n```yaml\nreceivers:\n apache:\n endpoint: \"http://localhost:8080/server-status?auto\"\n```\n\nThe full list of settings exposed for this receiver are documented [here](./config.go) with detailed sample configurations [here](./testdata/config.yaml).\n\n## Metrics\n\nDetails about the metrics produced by this receiver can be found in [metadata.yaml](./metadata.yaml)","properties":{"CustomRoundTripper":{"description":"Custom Round Tripper to allow for individual components to intercept HTTP requests","title":"CustomRoundTripper"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing HTTP calls.","title":"auth"},"collection_interval":{"title":"collection_interval","type":"string"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"endpoint":{"description":"The target URL to send data to (e.g.: http://some.url:9411/v1/traces).","title":"endpoint","type":"string"},"headers":{"description":"Additional headers attached to each HTTP request sent by the client.\nExisting header values are overwritten if collision happens.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"idle_conn_timeout":{"description":"IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"idle_conn_timeout","type":"string"},"initial_delay":{"title":"initial_delay","type":"string"},"max_conns_per_host":{"description":"MaxConnsPerHost limits the total number of connections per host, including connections in the dialing,\nactive, and idle states.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_conns_per_host","type":"integer"},"max_idle_conns":{"description":"MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns","type":"integer"},"max_idle_conns_per_host":{"description":"MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns_per_host","type":"integer"},"metrics":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachereceiver.internal.metadata.MetricsConfig","title":"metrics"},"read_buffer_size":{"description":"ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize.","title":"read_buffer_size","type":"integer"},"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachereceiver.internal.metadata.ResourceAttributesConfig","title":"resource_attributes"},"timeout":{"description":"Timeout parameter configures `http.Client.Timeout`.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"write_buffer_size":{"description":"WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize.","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachereceiver.internal.metadata.MetricConfig":{"additionalProperties":false,"description":"MetricConfig provides common config for a particular metric.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachereceiver.internal.metadata.MetricsConfig":{"additionalProperties":false,"description":"MetricsConfig provides config for apache metrics.","properties":{"apache.cpu.load":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachereceiver.internal.metadata.MetricConfig","title":"apache.cpu.load"},"apache.cpu.time":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachereceiver.internal.metadata.MetricConfig","title":"apache.cpu.time"},"apache.current_connections":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachereceiver.internal.metadata.MetricConfig","title":"apache.current_connections"},"apache.load.1":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachereceiver.internal.metadata.MetricConfig","title":"apache.load.1"},"apache.load.15":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachereceiver.internal.metadata.MetricConfig","title":"apache.load.15"},"apache.load.5":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachereceiver.internal.metadata.MetricConfig","title":"apache.load.5"},"apache.request.time":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachereceiver.internal.metadata.MetricConfig","title":"apache.request.time"},"apache.requests":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachereceiver.internal.metadata.MetricConfig","title":"apache.requests"},"apache.scoreboard":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachereceiver.internal.metadata.MetricConfig","title":"apache.scoreboard"},"apache.traffic":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachereceiver.internal.metadata.MetricConfig","title":"apache.traffic"},"apache.uptime":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachereceiver.internal.metadata.MetricConfig","title":"apache.uptime"},"apache.workers":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachereceiver.internal.metadata.MetricConfig","title":"apache.workers"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachereceiver.internal.metadata.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig provides common config for a particular resource attribute.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachereceiver.internal.metadata.ResourceAttributesConfig":{"additionalProperties":false,"description":"ResourceAttributesConfig provides config for apache resource attributes.","properties":{"apache.server.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachereceiver.internal.metadata.ResourceAttributeConfig","title":"apache.server.name"},"apache.server.port":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachereceiver.internal.metadata.ResourceAttributeConfig","title":"apache.server.port"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.Config":{"additionalProperties":false,"description":"Config defines the configuration for the various elements of the receiver agent.","markdownDescription":"# Apache Spark Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [development]: metrics |\n| Distributions | [contrib] |\n\n[development]: https://github.com/open-telemetry/opentelemetry-collector#development\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n\u003c!-- end autogenerated section --\u003e\n\nThis receiver fetches metrics for an Apache Spark cluster through the Apache Spark REST API - specifically, the /metrics/json, /api/v1/applications/[app-id]/stages, /api/v1/applications/[app-id]/executors, and /api/v1/applications/[app-id]/jobs endpoints.\n\n## Purpose\n\nThe purpose of this component is to allow monitoring of Apache Spark clusters and the applications running on them through the collection of performance metrics like memory utilization, CPU utilization, shuffle operations, garbage collection time, I/O operations, and more.\n\n## Prerequisites\n\nThis receiver supports Apache Spark versions:\n\n- 3.3.2+\n\n## Configuration\n\nThese configuration options are for connecting to an Apache Spark application.\n\nThe following settings are optional:\n\n- `collection_interval`: (default = `60s`): This receiver collects metrics on an interval. This value must be a string readable by Golang's [time.ParseDuration](https://pkg.go.dev/time#ParseDuration). Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`.\n- `initial_delay` (default = `1s`): defines how long this receiver waits before starting.\n- `endpoint`: (default = `http://localhost:4040`): Apache Spark endpoint to connect to in the form of `[http][://]{host}[:{port}]`\n- `application_names`: An array of Spark application names for which metrics should be collected. If no application names are specified, metrics will be collected for all Spark applications running on the cluster at the specified endpoint.\n\n### Example Configuration\n\n```yaml\nreceivers:\n apachespark:\n collection_interval: 60s\n endpoint: http://localhost:4040\n application_names:\n - PythonStatusAPIDemo\n - PythonLR\n```\n\nThe full list of settings exposed for this receiver are documented [here](./config.go) with detailed sample configurations [here](./testdata/config.yaml).\n\n## Metrics\n\nDetails about the metrics produced by this receiver can be found in [metadata.yaml](./metadata.yaml)","properties":{"CustomRoundTripper":{"description":"Custom Round Tripper to allow for individual components to intercept HTTP requests","title":"CustomRoundTripper"},"application_names":{"items":{"type":"string"},"title":"application_names","type":"array"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing HTTP calls.","title":"auth"},"collection_interval":{"title":"collection_interval","type":"string"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"endpoint":{"description":"The target URL to send data to (e.g.: http://some.url:9411/v1/traces).","title":"endpoint","type":"string"},"headers":{"description":"Additional headers attached to each HTTP request sent by the client.\nExisting header values are overwritten if collision happens.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"idle_conn_timeout":{"description":"IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"idle_conn_timeout","type":"string"},"initial_delay":{"title":"initial_delay","type":"string"},"max_conns_per_host":{"description":"MaxConnsPerHost limits the total number of connections per host, including connections in the dialing,\nactive, and idle states.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_conns_per_host","type":"integer"},"max_idle_conns":{"description":"MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns","type":"integer"},"max_idle_conns_per_host":{"description":"MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns_per_host","type":"integer"},"metrics":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricsConfig","title":"metrics"},"read_buffer_size":{"description":"ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize.","title":"read_buffer_size","type":"integer"},"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.ResourceAttributesConfig","title":"resource_attributes"},"timeout":{"description":"Timeout parameter configures `http.Client.Timeout`.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"write_buffer_size":{"description":"WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize.","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig":{"additionalProperties":false,"description":"MetricConfig provides common config for a particular metric.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricsConfig":{"additionalProperties":false,"description":"MetricsConfig provides config for apachespark metrics.","properties":{"spark.driver.block_manager.disk.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.driver.block_manager.disk.usage"},"spark.driver.block_manager.memory.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.driver.block_manager.memory.usage"},"spark.driver.code_generator.compilation.average_time":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.driver.code_generator.compilation.average_time"},"spark.driver.code_generator.compilation.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.driver.code_generator.compilation.count"},"spark.driver.code_generator.generated_class.average_size":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.driver.code_generator.generated_class.average_size"},"spark.driver.code_generator.generated_class.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.driver.code_generator.generated_class.count"},"spark.driver.code_generator.generated_method.average_size":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.driver.code_generator.generated_method.average_size"},"spark.driver.code_generator.generated_method.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.driver.code_generator.generated_method.count"},"spark.driver.code_generator.source_code.average_size":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.driver.code_generator.source_code.average_size"},"spark.driver.code_generator.source_code.operations":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.driver.code_generator.source_code.operations"},"spark.driver.dag_scheduler.job.active":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.driver.dag_scheduler.job.active"},"spark.driver.dag_scheduler.job.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.driver.dag_scheduler.job.count"},"spark.driver.dag_scheduler.stage.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.driver.dag_scheduler.stage.count"},"spark.driver.dag_scheduler.stage.failed":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.driver.dag_scheduler.stage.failed"},"spark.driver.executor.gc.operations":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.driver.executor.gc.operations"},"spark.driver.executor.gc.time":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.driver.executor.gc.time"},"spark.driver.executor.memory.execution":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.driver.executor.memory.execution"},"spark.driver.executor.memory.jvm":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.driver.executor.memory.jvm"},"spark.driver.executor.memory.pool":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.driver.executor.memory.pool"},"spark.driver.executor.memory.storage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.driver.executor.memory.storage"},"spark.driver.hive_external_catalog.file_cache_hits":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.driver.hive_external_catalog.file_cache_hits"},"spark.driver.hive_external_catalog.files_discovered":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.driver.hive_external_catalog.files_discovered"},"spark.driver.hive_external_catalog.hive_client_calls":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.driver.hive_external_catalog.hive_client_calls"},"spark.driver.hive_external_catalog.parallel_listing_jobs":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.driver.hive_external_catalog.parallel_listing_jobs"},"spark.driver.hive_external_catalog.partitions_fetched":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.driver.hive_external_catalog.partitions_fetched"},"spark.driver.jvm_cpu_time":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.driver.jvm_cpu_time"},"spark.driver.live_listener_bus.dropped":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.driver.live_listener_bus.dropped"},"spark.driver.live_listener_bus.posted":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.driver.live_listener_bus.posted"},"spark.driver.live_listener_bus.processing_time.average":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.driver.live_listener_bus.processing_time.average"},"spark.driver.live_listener_bus.queue_size":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.driver.live_listener_bus.queue_size"},"spark.executor.disk.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.executor.disk.usage"},"spark.executor.gc_time":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.executor.gc_time"},"spark.executor.input_size":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.executor.input_size"},"spark.executor.memory.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.executor.memory.usage"},"spark.executor.shuffle.io.size":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.executor.shuffle.io.size"},"spark.executor.storage_memory.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.executor.storage_memory.usage"},"spark.executor.task.active":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.executor.task.active"},"spark.executor.task.limit":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.executor.task.limit"},"spark.executor.task.result":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.executor.task.result"},"spark.executor.time":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.executor.time"},"spark.job.stage.active":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.job.stage.active"},"spark.job.stage.result":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.job.stage.result"},"spark.job.task.active":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.job.task.active"},"spark.job.task.result":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.job.task.result"},"spark.stage.disk.spilled":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.stage.disk.spilled"},"spark.stage.executor.cpu_time":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.stage.executor.cpu_time"},"spark.stage.executor.run_time":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.stage.executor.run_time"},"spark.stage.io.records":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.stage.io.records"},"spark.stage.io.size":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.stage.io.size"},"spark.stage.jvm_gc_time":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.stage.jvm_gc_time"},"spark.stage.memory.peak":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.stage.memory.peak"},"spark.stage.memory.spilled":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.stage.memory.spilled"},"spark.stage.shuffle.blocks_fetched":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.stage.shuffle.blocks_fetched"},"spark.stage.shuffle.fetch_wait_time":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.stage.shuffle.fetch_wait_time"},"spark.stage.shuffle.io.disk":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.stage.shuffle.io.disk"},"spark.stage.shuffle.io.read.size":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.stage.shuffle.io.read.size"},"spark.stage.shuffle.io.records":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.stage.shuffle.io.records"},"spark.stage.shuffle.io.write.size":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.stage.shuffle.io.write.size"},"spark.stage.shuffle.write_time":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.stage.shuffle.write_time"},"spark.stage.status":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.stage.status"},"spark.stage.task.active":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.stage.task.active"},"spark.stage.task.result":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.stage.task.result"},"spark.stage.task.result_size":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.MetricConfig","title":"spark.stage.task.result_size"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig provides common config for a particular resource attribute.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.ResourceAttributesConfig":{"additionalProperties":false,"description":"ResourceAttributesConfig provides config for apachespark resource attributes.","properties":{"spark.application.id":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.ResourceAttributeConfig","title":"spark.application.id"},"spark.application.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.ResourceAttributeConfig","title":"spark.application.name"},"spark.executor.id":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.ResourceAttributeConfig","title":"spark.executor.id"},"spark.job.id":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.ResourceAttributeConfig","title":"spark.job.id"},"spark.stage.attempt.id":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.ResourceAttributeConfig","title":"spark.stage.attempt.id"},"spark.stage.id":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.internal.metadata.ResourceAttributeConfig","title":"spark.stage.id"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.awscloudwatchreceiver.AutodiscoverConfig":{"additionalProperties":false,"description":"AutodiscoverConfig is the configuration for the autodiscovery functionality of log groups","properties":{"limit":{"title":"limit","type":"integer"},"prefix":{"title":"prefix","type":"string"},"streams":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.awscloudwatchreceiver.StreamConfig","title":"streams"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.awscloudwatchreceiver.Config":{"additionalProperties":false,"description":"Config is the overall config structure for the awscloudwatchreceiver","markdownDescription":"# Cloudwatch Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [alpha]: logs |\n| Distributions | [contrib], [observiq], [sumo] |\n\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nReceives Cloudwatch events from [AWS Cloudwatch](https://aws.amazon.com/cloudwatch/) via the [AWS SDK for Cloudwatch Logs](https://docs.aws.amazon.com/sdk-for-go/api/service/cloudwatchlogs/)\n\n## Getting Started\n\nThis receiver uses the [AWS SDK](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html) as mode of authentication, which includes [Credentials File](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html) and [IMDS](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html) authentication for EC2 instances.\n\n## Configuration\n\n### Top Level Parameters\n\n| Parameter | Notes | type | Description |\n| --------------- | ---------- | ------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |\n| `region` | *required* | string | The AWS recognized region string |\n| `profile` | *optional* | string | The AWS profile used to authenticate, if none is specified the default is chosen from the list of profiles |\n| `imds_endpoint` | *optional* | string | A way of specifying a custom URL to be used by the EC2 IMDS client to validate the session. If unset, and the environment variable `AWS_EC2_METADATA_SERVICE_ENDPOINT` has a value the client will use the value of the environment variable as the endpoint for operation calls. |\n| `logs` | *optional* | `Logs` | Configuration for Logs ingestion of this receiver |\n\n### Logs Parameters\n\n| Parameter | Notes | type | Description |\n| ------------------------ | ------------ | ---------------------- | ------------------------------------------------------------------------------------------ |\n| `poll_interval` | `default=1m` | duration | The duration waiting in between requests. |\n| `max_events_per_request` | `default=50` | int | The maximum number of events to process per request to Cloudwatch |\n| `groups` | *optional* | `See Group Parameters` | Configuration for Log Groups, by default all Log Groups and Log Streams will be collected. |\n\n### Group Parameters\n\n`autodiscover` and `named` are ways to control and filter which log groups and log streams which are collected from. They are mutually exclusive and are incompatible to be configured at the same time.\n\n- `autodiscover`\n - `limit`: (optional; default = 50) Limits the number of discovered log groups. This does not limit how large each API call to discover the log groups will be.\n - `prefix`: (optional) A prefix for log groups to limit the number of log groups discovered.\n - if omitted, all log streams up to the limit are collected from\n - `streams`: (optional) If `streams` is omitted, then all streams will be attempted to retrieve events from.\n - `names`: A list of full log stream names to filter the discovered log groups to collect from.\n - `prefixes`: A list of prefixes to filter the discovered log groups to collect from.\n- `named`\n - This is a map of log group name to stream filtering options\n - `streams`: (optional)\n - `names`: A list of full log stream names to filter the discovered log groups to collect from.\n - `prefixes`: A list of prefixes to filter the discovered log groups to collect from.\n\n#### Autodiscovery Example Configuration\n\n```yaml\nawscloudwatch:\n region: us-west-1\n logs:\n poll_interval: 1m\n groups:\n autodiscover:\n limit: 100\n prefix: /aws/eks/\n streams:\n prefixes: [kube-api-controller]\n```\n\n#### Named Example\n\n```yaml\nawscloudwatch:\n region: us-west-1\n logs:\n poll_interval: 5m\n groups:\n named:\n /aws/eks/dev-0/cluster: \n names: [kube-apiserver-ea9c831555adca1815ae04b87661klasdj]\n```\n\n## Sample Configs\n\nThis receiver has a number of sample configs for reference.\n\n1. [Default](./testdata/sample-configs/default.yaml)\n\n - Minimal configuration of the receiver\n - Performs autodiscovery\n - Collects all log groups and log streams\n\n2. [Autodiscover Filtering Log Groups](./testdata/sample-configs/autodiscover-filter-groups.yaml)\n\n - Performs autodiscovery\n - Only collects log groups matching a prefix\n - Limits the number of discovered Log Groups\n\n3. [Autodiscover Filtering Log Streams](./testdata/sample-configs/autodiscover-filter-streams.yaml)\n\n - Performs autodiscovery for all Log Groups\n - Filters log streams\n\n4. [Named Groups](./testdata/sample-configs/named-prefix.yaml)\n\n - Specifies and only collects from the desired Log Groups\n - Does not attempt autodiscovery\n\n5. [Named Groups Filter Log Streams](./testdata/sample-configs/named-prefix-streams.yaml)\n\n - Specifies the names of the log groups to collect\n - Does not attempt autodiscovery\n - Only collects from log streams matching a prefix","properties":{"imds_endpoint":{"title":"imds_endpoint","type":"string"},"logs":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.awscloudwatchreceiver.LogsConfig","title":"logs"},"profile":{"title":"profile","type":"string"},"region":{"title":"region","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.awscloudwatchreceiver.GroupConfig":{"additionalProperties":false,"description":"GroupConfig is the configuration for log group collection","properties":{"autodiscover":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.awscloudwatchreceiver.AutodiscoverConfig","title":"autodiscover"},"named":{"patternProperties":{".*":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.awscloudwatchreceiver.StreamConfig"}},"title":"named","type":"object"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.awscloudwatchreceiver.LogsConfig":{"additionalProperties":false,"description":"LogsConfig is the configuration for the logs portion of this receiver","properties":{"groups":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.awscloudwatchreceiver.GroupConfig","title":"groups"},"max_events_per_request":{"title":"max_events_per_request","type":"integer"},"poll_interval":{"title":"poll_interval","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.awscloudwatchreceiver.StreamConfig":{"additionalProperties":false,"description":"StreamConfig represents the configuration for the log stream filtering","properties":{"names":{"items":{"type":"string"},"title":"names","type":"array"},"prefixes":{"items":{"type":"string"},"title":"prefixes","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.awscontainerinsightreceiver.Config":{"additionalProperties":false,"description":"Config defines configuration for aws ecs container metrics receiver.","markdownDescription":"# AWS Container Insights Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: metrics |\n| Distributions | [contrib], [aws], [observiq], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[aws]: https://github.com/aws-observability/aws-otel-collector\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\n## Overview\n\nAWS Container Insights Receiver (`awscontainerinsightreceiver`) is an AWS specific receiver that supports [CloudWatch Container Insights](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContainerInsights.html). CloudWatch Container Insights collect, aggregate, \nand summarize metrics and logs from your containerized applications and microservices. Data are collected as as performance log events \nusing [embedded metric format](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Embedded_Metric_Format.html). From the EMF data, Amazon CloudWatch can create the aggregated CloudWatch metrics at the cluster, node, pod, task, and service level.\n\nCloudWatch Container Insights has been supported by [ECS Agent](https://github.com/aws/amazon-ecs-agent) and [CloudWatch Agent](https://github.com/aws/amazon-cloudwatch-agent) to collect infrastructure metrics for many resources such as such as CPU, memory, disk, and network. To migrate existing customers to use OpenTelemetry, AWS Container Insights Receiver (together with CloudWatch EMF Exporter) aims to support the same CloudWatch Container Insights experience for the following platforms: \n * Amazon ECS \n * Amazon EKS\n * Kubernetes platforms on Amazon EC2\n\n## Design of AWS Container Insights Receiver\n\nSee the [design doc](./design.md)\n\n## Configuration\nExample configuration:\n```\nreceivers:\n awscontainerinsightreceiver:\n # all parameters are optional\n collection_interval: 60s\n container_orchestrator: eks\n add_service_as_attribute: true \n prefer_full_pod_name: false \n add_full_pod_name_metric_label: false \n```\nThere is no need to provide any parameters since they are all optional. \n\n**collection_interval (optional)**\n\nThe interval at which metrics should be collected. The default is 60 second.\n\n**container_orchestrator (optional)**\n\nThe type of container orchestration service, e.g. eks or ecs. The default is eks.\n\n**add_service_as_attribute (optional)**\n\nWhether to add the associated service name as attribute. The default is true\n\n**prefer_full_pod_name (optional)**\n\nThe \"PodName\" attribute is set based on the name of the relevant controllers like Daemonset, Job, ReplicaSet, ReplicationController, ... If it can not be set that way and PrefFullPodName is true, the \"PodName\" attribute is set to the pod's own name. The default value is false.\n\n**add_full_pod_name_metric_label (optional)**\n\nThe \"FullPodName\" attribute is the pod name including suffix. If false FullPodName label is not added. The default value is false\n\n## Sample configuration for Container Insights \nThis is a sample configuration for AWS Container Insights using the `awscontainerinsightreceiver` and `awsemfexporter` for an EKS cluster:\n```\n# create namespace\napiVersion: v1\nkind: Namespace\nmetadata:\n name: aws-otel-eks\n labels:\n name: aws-otel-eks\n\n---\n# create cwagent service account and role binding\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: aws-otel-sa\n namespace: aws-otel-eks\n\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: aoc-agent-role\nrules:\n - apiGroups: [\"\"]\n resources: [\"pods\", \"nodes\", \"endpoints\"]\n verbs: [\"list\", \"watch\"]\n - apiGroups: [\"apps\"]\n resources: [\"replicasets\"]\n verbs: [\"list\", \"watch\"]\n - apiGroups: [\"batch\"]\n resources: [\"jobs\"]\n verbs: [\"list\", \"watch\"]\n - apiGroups: [\"\"]\n resources: [\"nodes/proxy\"]\n verbs: [\"get\"]\n - apiGroups: [\"\"]\n resources: [\"nodes/stats\", \"configmaps\", \"events\"]\n verbs: [\"create\", \"get\"]\n - apiGroups: [\"\"]\n resources: [\"configmaps\"]\n resourceNames: [\"otel-container-insight-clusterleader\"]\n verbs: [\"get\",\"update\"]\n\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: aoc-agent-role-binding\nsubjects:\n - kind: ServiceAccount\n name: aws-otel-sa\n namespace: aws-otel-eks\nroleRef:\n kind: ClusterRole\n name: aoc-agent-role\n apiGroup: rbac.authorization.k8s.io\n\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: otel-agent-conf\n namespace: aws-otel-eks\n labels:\n app: opentelemetry\n component: otel-agent-conf\ndata:\n otel-agent-config: |\n extensions:\n health_check:\n\n receivers:\n awscontainerinsightreceiver:\n\n processors:\n batch/metrics:\n timeout: 60s\n\n exporters:\n awsemf:\n namespace: ContainerInsights\n log_group_name: '/aws/containerinsights/{ClusterName}/performance'\n log_stream_name: '{NodeName}'\n resource_to_telemetry_conversion:\n enabled: true\n dimension_rollup_option: NoDimensionRollup\n parse_json_encoded_attr_values: [Sources, kubernetes]\n metric_declarations:\n # node metrics\n - dimensions: [[NodeName, InstanceId, ClusterName]]\n metric_name_selectors:\n - node_cpu_utilization\n - node_memory_utilization\n - node_network_total_bytes\n - node_cpu_reserved_capacity\n - node_memory_reserved_capacity\n - node_number_of_running_pods\n - node_number_of_running_containers\n - dimensions: [[ClusterName]]\n metric_name_selectors:\n - node_cpu_utilization\n - node_memory_utilization\n - node_network_total_bytes\n - node_cpu_reserved_capacity\n - node_memory_reserved_capacity\n - node_number_of_running_pods\n - node_number_of_running_containers\n - node_cpu_usage_total\n - node_cpu_limit\n - node_memory_working_set\n - node_memory_limit\n\n # pod metrics\n - dimensions: [[PodName, Namespace, ClusterName], [Service, Namespace, ClusterName], [Namespace, ClusterName], [ClusterName]]\n metric_name_selectors:\n - pod_cpu_utilization\n - pod_memory_utilization\n - pod_network_rx_bytes\n - pod_network_tx_bytes\n - pod_cpu_utilization_over_pod_limit\n - pod_memory_utilization_over_pod_limit\n - dimensions: [[PodName, Namespace, ClusterName], [ClusterName]]\n metric_name_selectors:\n - pod_cpu_reserved_capacity\n - pod_memory_reserved_capacity\n - dimensions: [[PodName, Namespace, ClusterName]]\n metric_name_selectors:\n - pod_number_of_container_restarts\n\n # cluster metrics\n - dimensions: [[ClusterName]]\n metric_name_selectors:\n - cluster_node_count\n - cluster_failed_node_count\n\n # service metrics\n - dimensions: [[Service, Namespace, ClusterName], [ClusterName]]\n metric_name_selectors:\n - service_number_of_running_pods\n\n # node fs metrics\n - dimensions: [[NodeName, InstanceId, ClusterName], [ClusterName]]\n metric_name_selectors:\n - node_filesystem_utilization\n\n # namespace metrics\n - dimensions: [[Namespace, ClusterName], [ClusterName]]\n metric_name_selectors:\n - namespace_number_of_running_pods\n\n\n logging:\n loglevel: debug\n\n service:\n pipelines:\n metrics:\n receivers: [awscontainerinsightreceiver]\n processors: [batch/metrics]\n exporters: [awsemf]\n\n extensions: [health_check]\n\n---\n# create Daemonset\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n name: aws-otel-eks-ci\n namespace: aws-otel-eks\nspec:\n selector:\n matchLabels:\n name: aws-otel-eks-ci\n template:\n metadata:\n labels:\n name: aws-otel-eks-ci\n spec:\n containers:\n - name: aws-otel-collector\n image: {collector-image-url}\n env:\n #- name: AWS_REGION\n # value: \"us-east-1\"\n - name: K8S_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n - name: HOST_IP\n valueFrom:\n fieldRef:\n fieldPath: status.hostIP\n - name: HOST_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n - name: K8S_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n imagePullPolicy: Always\n command:\n - \"/awscollector\"\n - \"--config=/conf/otel-agent-config.yaml\"\n volumeMounts:\n - name: rootfs\n mountPath: /rootfs\n readOnly: true\n - name: dockersock\n mountPath: /var/run/docker.sock\n readOnly: true\n - name: varlibdocker\n mountPath: /var/lib/docker\n readOnly: true\n - name: containerdsock\n mountPath: /run/containerd/containerd.sock\n readOnly: true\n - name: sys\n mountPath: /sys\n readOnly: true\n - name: devdisk\n mountPath: /dev/disk\n readOnly: true\n - name: otel-agent-config-vol\n mountPath: /conf\n resources:\n limits:\n cpu: 200m\n memory: 200Mi\n requests:\n cpu: 200m\n memory: 200Mi\n volumes:\n - configMap:\n name: otel-agent-conf\n items:\n - key: otel-agent-config\n path: otel-agent-config.yaml\n name: otel-agent-config-vol\n - name: rootfs\n hostPath:\n path: /\n - name: dockersock\n hostPath:\n path: /var/run/docker.sock\n - name: varlibdocker\n hostPath:\n path: /var/lib/docker\n - name: containerdsock\n hostPath:\n path: /run/containerd/containerd.sock\n - name: sys\n hostPath:\n path: /sys\n - name: devdisk\n hostPath:\n path: /dev/disk/\n serviceAccountName: aws-otel-sa\n```\n\nTo deploy to an EKS cluster\n```\nkubectl apply -f config.yaml\n```\n\n## Available Metrics and Resource Attributes\n### Cluster\n| Metric | Unit |\n|---------------------------|-------|\n| cluster_failed_node_count | Count |\n| cluster_node_count | Count |\n\n\u003cbr/\u003e\u003cbr/\u003e \n| Resource Attribute |\n|--------------------|\n| ClusterName |\n| NodeName |\n| Type |\n| Timestamp |\n| Version |\n| Sources |\n\n\u003cbr/\u003e\u003cbr/\u003e \n\u003cbr/\u003e\u003cbr/\u003e \n\n### Cluster Namespace\n| Metric | Unit |\n|----------------------------------|-------|\n| namespace_number_of_running_pods | Count |\n\n\u003cbr/\u003e\u003cbr/\u003e \n| Resource Attribute |\n|--------------------|\n| ClusterName |\n| NodeName |\n| Namespace |\n| Type |\n| Timestamp |\n| Version |\n| Sources |\n| kubernete |\n\n\u003cbr/\u003e\u003cbr/\u003e \n\u003cbr/\u003e\u003cbr/\u003e \n\n### Cluster Service\n| Metric | Unit |\n|--------------------------------|-------|\n| service_number_of_running_pods | Count |\n\n\n\u003cbr/\u003e\u003cbr/\u003e \n| Resource Attribute |\n|--------------------|\n| ClusterName |\n| NodeName |\n| Namespace |\n| Service |\n| Type |\n| Timestamp |\n| Version |\n| Sources |\n| kubernete |\n\n\n\u003cbr/\u003e\u003cbr/\u003e \n\u003cbr/\u003e\u003cbr/\u003e \n\n### Node\n| Metric | Unit |\n|-------------------------------------|---------------|\n| node_cpu_limit | Millicore |\n| node_cpu_request | Millicore |\n| node_cpu_reserved_capacity | Percent |\n| node_cpu_usage_system | Millicore |\n| node_cpu_usage_total | Millicore |\n| node_cpu_usage_user | Millicore |\n| node_cpu_utilization | Percent |\n| node_memory_cache | Bytes |\n| node_memory_failcnt | Count |\n| node_memory_hierarchical_pgfault | Count/Second |\n| node_memory_hierarchical_pgmajfault | Count/Second |\n| node_memory_limit | Bytes |\n| node_memory_mapped_file | Bytes |\n| node_memory_max_usage | Bytes |\n| node_memory_pgfault | Count/Second |\n| node_memory_pgmajfault | Count/Second |\n| node_memory_request | Bytes |\n| node_memory_reserved_capacity | Percent |\n| node_memory_rss | Bytes |\n| node_memory_swap | Bytes |\n| node_memory_usage | Bytes |\n| node_memory_utilization | Percent |\n| node_memory_working_set | Bytes |\n| node_network_rx_bytes | Bytes/Second |\n| node_network_rx_dropped | Count/Second |\n| node_network_rx_errors | Count/Second |\n| node_network_rx_packets | Count/Second |\n| node_network_total_bytes | Bytes/Second |\n| node_network_tx_bytes | Bytes/Second |\n| node_network_tx_dropped | Count/Second |\n| node_network_tx_errors | Count/Second |\n| node_network_tx_packets | Count/Second |\n| node_number_of_running_containers | Count |\n| node_number_of_running_pods | Count |\n\n\u003cbr/\u003e\u003cbr/\u003e \n| Resource Attribute |\n|----------------------|\n| ClusterName |\n| InstanceType |\n| NodeName |\n| Timestamp |\n| Type |\n| Version |\n| Sources |\n| kubernete |\n\n\u003cbr/\u003e\u003cbr/\u003e \n\u003cbr/\u003e\u003cbr/\u003e \n\n### Node Disk IO\n| Metric | Unit |\n|------------------------------------|---------------|\n| node_diskio_io_serviced_async | Count/Second |\n| node_diskio_io_serviced_read | Count/Second |\n| node_diskio_io_serviced_sync | Count/Second |\n| node_diskio_io_serviced_total | Count/Second |\n| node_diskio_io_serviced_write | Count/Second |\n| node_diskio_io_service_bytes_async | Bytes/Second |\n| node_diskio_io_service_bytes_read | Bytes/Second |\n| node_diskio_io_service_bytes_sync | Bytes/Second |\n| node_diskio_io_service_bytes_total | Bytes/Second |\n| node_diskio_io_service_bytes_write | Bytes/Second |\n\n\u003cbr/\u003e\u003cbr/\u003e \n| Resource Attribute |\n|----------------------|\n| AutoScalingGroupName |\n| ClusterName |\n| InstanceId |\n| InstanceType |\n| NodeName |\n| Timestamp |\n| EBSVolumeId |\n| device |\n| Type |\n| Version |\n| Sources |\n| kubernete |\n\u003cbr/\u003e\u003cbr/\u003e \n\u003cbr/\u003e\u003cbr/\u003e \n\n### Node Filesystem\n| Metric | Unit |\n|-----------------------------|---------|\n| node_filesystem_available | Bytes |\n| node_filesystem_capacity | Bytes |\n| node_filesystem_inodes | Count |\n| node_filesystem_inodes_free | Count |\n| node_filesystem_usage | Bytes |\n| node_filesystem_utilization | Percent |\n\n\u003cbr/\u003e\u003cbr/\u003e \n| Resource Attribute |\n|----------------------|\n| AutoScalingGroupName |\n| ClusterName |\n| InstanceId |\n| InstanceType |\n| NodeName |\n| Timestamp |\n| EBSVolumeId |\n| device |\n| fstype |\n| Type |\n| Version |\n| Sources |\n| kubernete |\n\u003cbr/\u003e\u003cbr/\u003e \n\u003cbr/\u003e\u003cbr/\u003e \n\n### Node Network\n| Metric | Unit |\n|------------------------------------|--------------|\n| node_interface_network_rx_bytes | Bytes/Second |\n| node_interface_network_rx_dropped | Count/Second |\n| node_interface_network_rx_errors | Count/Second |\n| node_interface_network_rx_packets | Count/Second |\n| node_interface_network_total_bytes | Bytes/Second |\n| node_interface_network_tx_bytes | Bytes/Second |\n| node_interface_network_tx_dropped | Count/Second |\n| node_interface_network_tx_errors | Count/Second |\n| node_interface_network_tx_packets | Count/Second |\n\n\u003cbr/\u003e\u003cbr/\u003e \n| Resource Attribute |\n|----------------------|\n| AutoScalingGroupName |\n| ClusterName |\n| InstanceId |\n| InstanceType |\n| NodeName |\n| Timestamp |\n| Type |\n| Version |\n| interface |\n| Sources |\n| kubernete |\n\u003cbr/\u003e\u003cbr/\u003e \n\u003cbr/\u003e\u003cbr/\u003e \n\n### Pod\n| Metric | Unit |\n|---------------------------------------|---------------|\n| pod_cpu_limit | Millicore |\n| pod_cpu_request | Millicore |\n| pod_cpu_reserved_capacity | Percent |\n| pod_cpu_usage_system | Millicore |\n| pod_cpu_usage_total | Millicore |\n| pod_cpu_usage_user | Millicore |\n| pod_cpu_utilization | Percent |\n| pod_cpu_utilization_over_pod_limit | Percent |\n| pod_memory_cache | Bytes |\n| pod_memory_failcnt | Count |\n| pod_memory_hierarchical_pgfault | Count/Second |\n| pod_memory_hierarchical_pgmajfault | Count/Second |\n| pod_memory_limit | Bytes |\n| pod_memory_mapped_file | Bytes |\n| pod_memory_max_usage | Bytes |\n| pod_memory_pgfault | Count/Second |\n| pod_memory_pgmajfault | Count/Second |\n| pod_memory_request | Bytes |\n| pod_memory_reserved_capacity | Percent |\n| pod_memory_rss | Bytes |\n| pod_memory_swap | Bytes |\n| pod_memory_usage | Bytes |\n| pod_memory_utilization | Percent |\n| pod_memory_utilization_over_pod_limit | Percent |\n| pod_memory_working_set | Bytes |\n| pod_network_rx_bytes | Bytes/Second |\n| pod_network_rx_dropped | Count/Second |\n| pod_network_rx_errors | Count/Second |\n| pod_network_rx_packets | Count/Second |\n| pod_network_total_bytes | Bytes/Second |\n| pod_network_tx_bytes | Bytes/Second |\n| pod_network_tx_dropped | Count/Second |\n| pod_network_tx_errors | Count/Second |\n| pod_network_tx_packets | Count/Second |\n| pod_number_of_container_restarts | Count | \n| pod_number_of_containers | Count | \n| pod_number_of_running_containers | Count | \n\n| Resource Attribute |\n|----------------------|\n| AutoScalingGroupName |\n| ClusterName |\n| InstanceId |\n| InstanceType |\n| K8sPodName |\n| Namespace |\n| NodeName |\n| PodId |\n| Timestamp |\n| Type |\n| Version |\n| Sources |\n| kubernete |\n| pod_status |\n\n\u003cbr/\u003e\u003cbr/\u003e \n\n### Pod Network\n| Metric | Unit |\n|------------------------------------|--------------|\n| pod_interface_network_rx_bytes | Bytes/Second |\n| pod_interface_network_rx_dropped | Count/Second |\n| pod_interface_network_rx_errors | Count/Second |\n| pod_interface_network_rx_packets | Count/Second |\n| pod_interface_network_total_bytes | Bytes/Second |\n| pod_interface_network_tx_bytes | Bytes/Second |\n| pod_interface_network_tx_dropped | Count/Second |\n| pod_interface_network_tx_errors | Count/Second |\n| pod_interface_network_tx_packets | Count/Second |\n\n\u003cbr/\u003e\u003cbr/\u003e \n| Resource Attribute |\n|----------------------|\n| AutoScalingGroupName |\n| ClusterName |\n| InstanceId |\n| InstanceType |\n| K8sPodName |\n| Namespace |\n| NodeName |\n| PodId |\n| Timestamp |\n| Type |\n| Version |\n| interface |\n| Sources |\n| kubernete |\n| pod_status |\n\u003cbr/\u003e\u003cbr/\u003e \n\u003cbr/\u003e\u003cbr/\u003e \n\n\n### Container\n| Metric | Unit |\n|-----------------------------------------|---------------|\n| container_cpu_limit | Millicore |\n| container_cpu_request | Millicore |\n| container_cpu_usage_system | Millicore |\n| container_cpu_usage_total | Millicore |\n| container_cpu_usage_user | Millicore |\n| container_cpu_utilization | Percent |\n| container_memory_cache | Bytes |\n| container_memory_failcnt | Count |\n| container_memory_hierarchical_pgfault | Count/Second |\n| container_memory_hierarchical_pgmajfault| Count/Second |\n| container_memory_limit | Bytes |\n| container_memory_mapped_file | Bytes |\n| container_memory_max_usage | Bytes |\n| container_memory_pgfault | Count/Second |\n| container_memory_pgmajfault | Count/Second |\n| container_memory_request | Bytes |\n| container_memory_rss | Bytes |\n| container_memory_swap | Bytes |\n| container_memory_usage | Bytes |\n| container_memory_utilization | Percent |\n| container_memory_working_set | Bytes |\n| number_of_container_restarts | Count |\n\n\u003cbr/\u003e\u003cbr/\u003e \n\n| Resource Attribute |\n|-----------------------------------|\n| AutoScalingGroupName |\n| ClusterName |\n| ContainerId |\n| ContainerName |\n| InstanceId |\n| InstanceType |\n| K8sPodName |\n| Namespace |\n| NodeName |\n| PodId |\n| Timestamp |\n| Type |\n| Version |\n| Sources |\n| kubernetes |\n| container_status |\n| container_status_reason |\n| container_last_termination_reason | \n\nThe attribute `container_status_reason` is present only when `container_status` is in \"Waiting\" or \"Terminated\" State. The attribute `container_last_termination_reason` is present only when `container_status` is in \"Terminated\" State.\n\nThis is a sample configuration for AWS Container Insights using the `awscontainerinsightreceiver` and `awsemfexporter` for an ECS cluster to collect the instance level metrics:\n```\nreceivers:\n awscontainerinsightreceiver:\n collection_interval: 10s\n container_orchestrator: ecs\n\nprocessors:\n batch/metrics:\n timeout: 60s\n\nexporters:\n awsemf:\n namespace: ContainerInsightsEC2Instance\n log_group_name: '/aws/ecs/containerinsights/{ClusterName}/performance'\n log_stream_name: 'instanceTelemetry/{ContainerInstanceId}'\n resource_to_telemetry_conversion:\n enabled: true\n dimension_rollup_option: NoDimensionRollup\n parse_json_encoded_attr_values: [Sources]\n metric_declarations:\n # instance metrics\n - dimensions: [ [ ContainerInstanceId, InstanceId, ClusterName] ]\n metric_name_selectors:\n - instance_cpu_utilization\n - instance_memory_utilization\n - instance_network_total_bytes\n - instance_cpu_reserved_capacity\n - instance_memory_reserved_capacity\n - instance_number_of_running_tasks\n - instance_filesystem_utilization\n - dimensions: [ [ClusterName] ]\n metric_name_selectors:\n - instance_cpu_utilization\n - instance_memory_utilization\n - instance_network_total_bytes\n - instance_cpu_reserved_capacity\n - instance_memory_reserved_capacity\n - instance_number_of_running_tasks\n - instance_cpu_usage_total\n - instance_cpu_limit\n - instance_memory_working_set\n - instance_memory_limit\n logging:\n loglevel: debug\nservice:\n pipelines:\n metrics:\n receivers: [awscontainerinsightreceiver]\n processors: [batch/metrics]\n exporters: [awsemf,logging]\n```\nTo deploy to an ECS cluster check this [doc](https://aws-otel.github.io/docs/setup/ecs#3-setup-the-aws-otel-collector-for-ecs-ec2-instance-metrics) for details\n\n## Available Metrics and Resource Attributes\n### Instance\n| Metric | Unit |\n|-----------------------------------------|---------------|\n| instance_cpu_limit | Millicore |\n| instance_cpu_reserved_capacity | Percent |\n| instance_cpu_usage_system | Millicore |\n| instance_cpu_usage_total | Millicore |\n| instance_cpu_usage_user | Millicore |\n| instance_cpu_utilization | Percent |\n| instance_memory_cache | Bytes |\n| instance_memory_failcnt | Count |\n| instance_memory_hierarchical_pgfault | Count/Second |\n| instance_memory_hierarchical_pgmajfault | Count/Second |\n| instance_memory_limit | Bytes |\n| instance_memory_mapped_file | Bytes |\n| instance_memory_max_usage | Bytes |\n| instance_memory_pgfault | Count/Second |\n| instance_memory_pgmajfault | Count/Second |\n| instance_memory_reserved_capacity | Percent |\n| instance_memory_rss | Bytes |\n| instance_memory_swap | Bytes |\n| instance_memory_usage | Bytes |\n| instance_memory_utilization | Percent |\n| instance_memory_working_set | Bytes |\n| instance_network_rx_bytes | Bytes/Second |\n| instance_network_rx_dropped | Count/Second |\n| instance_network_rx_errors | Count/Second |\n| instance_network_rx_packets | Count/Second |\n| instance_network_total_bytes | Bytes/Second |\n| instance_network_tx_bytes | Bytes/Second |\n| instance_network_tx_dropped | Count/Second |\n| instance_network_tx_errors | Count/Second |\n| instance_network_tx_packets | Count/Second |\n| instance_number_of_running_tasks | Count |\n\u003cbr/\u003e\u003cbr/\u003e\n\n| Resource Attribute |\n|----------------------|\n| ClusterName |\n| InstanceType |\n| AutoScalingGroupName |\n| Timestamp |\n| Type |\n| Version |\n| Sources |\n| ContainerInstanceId |\n| InstanceId |\n\n\u003cbr/\u003e\u003cbr/\u003e\n\u003cbr/\u003e\u003cbr/\u003e\n\n### Instance Disk IO\n| Metric | Unit |\n|----------------------------------------|---------------|\n| instance_diskio_io_serviced_async | Count/Second |\n| instance_diskio_io_serviced_read | Count/Second |\n| instance_diskio_io_serviced_sync | Count/Second |\n| instance_diskio_io_serviced_total | Count/Second |\n| instance_diskio_io_serviced_write | Count/Second |\n| instance_diskio_io_service_bytes_async | Bytes/Second |\n| instance_diskio_io_service_bytes_read | Bytes/Second |\n| instance_diskio_io_service_bytes_sync | Bytes/Second |\n| instance_diskio_io_service_bytes_total | Bytes/Second |\n| instance_diskio_io_service_bytes_write | Bytes/Second |\n\n\u003cbr/\u003e\u003cbr/\u003e\n\n| Resource Attribute |\n|----------------------|\n| ClusterName |\n| InstanceType |\n| AutoScalingGroupName |\n| Timestamp |\n| Type |\n| Version |\n| Sources |\n| ContainerInstanceId |\n| InstanceId |\n| EBSVolumeId |\n\n\u003cbr/\u003e\u003cbr/\u003e\n\u003cbr/\u003e\u003cbr/\u003e\n\n### Instance Filesystem\n| Metric | Unit |\n|---------------------------------|---------|\n| instance_filesystem_available | Bytes |\n| instance_filesystem_capacity | Bytes |\n| instance_filesystem_inodes | Count |\n| instance_filesystem_inodes_free | Count |\n| instance_filesystem_usage | Bytes |\n| instance_filesystem_utilization | Percent |\n\n\u003cbr/\u003e\u003cbr/\u003e\n| Resource Attribute |\n|----------------------|\n| ClusterName |\n| InstanceType |\n| AutoScalingGroupName |\n| Timestamp |\n| Type |\n| Version |\n| Sources |\n| ContainerInstanceId |\n| InstanceId |\n| EBSVolumeId |\n\u003cbr/\u003e\u003cbr/\u003e\n\u003cbr/\u003e\u003cbr/\u003e\n\n### Instance Network\n| Metric | Unit |\n|----------------------------------------|--------------|\n| instance_interface_network_rx_bytes | Bytes/Second |\n| instance_interface_network_rx_dropped | Count/Second |\n| instance_interface_network_rx_errors | Count/Second |\n| instance_interface_network_rx_packets | Count/Second |\n| instance_interface_network_total_bytes | Bytes/Second |\n| instance_interface_network_tx_bytes | Bytes/Second |\n| instance_interface_network_tx_dropped | Count/Second |\n| instance_interface_network_tx_errors | Count/Second |\n| instance_interface_network_tx_packets | Count/Second |\n\n\u003cbr/\u003e\u003cbr/\u003e\n| Resource Attribute |\n|----------------------|\n| ClusterName |\n| InstanceType |\n| AutoScalingGroupName |\n| Timestamp |\n| Type |\n| Version |\n| Sources |\n| ContainerInstanceId |\n| InstanceId |\n| EBSVolumeId |\n\u003cbr/\u003e\u003cbr/\u003e\n\u003cbr/\u003e\u003cbr/\u003e","properties":{"add_full_pod_name_metric_label":{"description":"The \"FullPodName\" attribute is the pod name including suffix\nIf false FullPodName label is not added\nThe default value is false","title":"add_full_pod_name_metric_label","type":"boolean"},"add_service_as_attribute":{"description":"Whether to add the associated service name as attribute. The default is true","title":"add_service_as_attribute","type":"boolean"},"collection_interval":{"description":"CollectionInterval is the interval at which metrics should be collected. The default is 60 second.","title":"collection_interval","type":"string"},"container_orchestrator":{"description":"ContainerOrchestrator is the type of container orchestration service, e.g. eks or ecs. The default is eks.","title":"container_orchestrator","type":"string"},"prefer_full_pod_name":{"description":"The \"PodName\" attribute is set based on the name of the relevant controllers like Daemonset, Job, ReplicaSet, ReplicationController, ...\nIf it can not be set that way and PrefFullPodName is true, the \"PodName\" attribute is set to the pod's own name.\nThe default value is false","title":"prefer_full_pod_name","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.awsecscontainermetricsreceiver.Config":{"additionalProperties":false,"description":"Config defines configuration for aws ecs container metrics receiver.","markdownDescription":"# AWS ECS Container Metrics Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: metrics |\n| Distributions | [contrib], [aws], [observiq], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[aws]: https://github.com/aws-observability/aws-otel-collector\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\n## Overview\n\nAWS ECS Container Metrics Receiver (`awsecscontainermetrics`) reads task metadata and [docker stats](https://docs.docker.com/engine/api/v1.30/#operation/ContainerStats) from [Amazon ECS Task Metadata Endpoint](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint.html), and generates resource usage metrics (such as CPU, memory, network, and disk) from them. To get the full list of metrics, see the [Available Metrics](#available-metrics) section below.\n\nThis receiver works only for [ECS Task Metadata Endpoint V4](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint-v4.html). Amazon ECS tasks on Fargate that use platform version 1.4.0 or later and Amazon ECS tasks on Amazon EC2 that are running at least version 1.39.0 of the Amazon ECS container agent can utilize this receiver. For more information, see [Amazon ECS Container Agent Versions](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-versions.html).\n\n\n## Configuration\n\nExample:\n\n```yaml\nreceivers:\n awsecscontainermetrics:\n collection_interval: 20s\n```\n\n#### collection_interval:\n\nThis receiver collects task metadata and container stats at a fixed interval and emits metrics to the next consumer of OpenTelemetry pipeline. `collection_interval` will determine the frequency at which metrics are collected and emitted by this receiver.\n\ndefault: `20s`\n\n\n## Enabling the AWS ECS Container Metrics Receiver\n\nTo enable the awsecscontainermetrics receiver, add the name under receiver section in the OpenTelemetry config file. By default, the receiver scrapes the ECS task metadata endpoint every 20s and collects all metrics (For the full list of metrics, see [Available Metrics](#available-metrics)).\n\nThe following configuration collects AWS ECS resource usage metrics by using `awsecscontainermetrics` receiver and sends them to CloudWatch using `awsemf` exporter. Check out [SETUP](https://aws-otel.github.io/docs/setup/ecs) section for configuring AWS Distro for OpenTelemetry Collector in Amazon Elastic Container Service.\n\n```yaml\nreceivers:\n awsecscontainermetrics:\nexporters:\n awsemf:\n namespace: 'ECS/ContainerMetrics/OpenTelemetry'\n log_group_name: '/ecs/containermetrics/opentelemetry'\n\nservice:\n pipelines:\n metrics:\n receivers: [awsecscontainermetrics]\n exporters: [awsemf]\n```\n\n## Set Metrics Collection Interval\n\nCustomers can configure `collection_interval` under `awsecscontainermetrics` receiver to scrape and gather metrics at a specific interval. The following example configuration will collect metrics every 40 seconds.\n\n```yaml\nreceivers:\n awsecscontainermetrics:\n collection_interval: 40s\nexporters:\n awsemf:\n namespace: 'ECS/ContainerMetrics/OpenTelemetry'\n log_group_name: '/ecs/containermetrics/opentelemetry'\n\nservice:\n pipelines:\n metrics:\n receivers: [awsecscontainermetrics]\n exporters: [awsemf]\n```\n\n## Collect specific metrics and update metric names\n\nThe previous configurations collect all the metrics and sends them to Amazon CloudWatch using default names. Customers can use `filter` and `metrictransform` processors to send specific metrics and rename them respectively.\n\nThe following configuration example collects only the `ecs.task.memory.utilized` metric and renames it to `MemoryUtilized` before sending to CloudWatch.\n\n```yaml\nreceivers:\n awsecscontainermetrics:\nexporters:\n awsemf:\n namespace: 'ECS/ContainerMetrics/OpenTelemetry'\n log_group_name: '/ecs/containermetrics/opentelemetry'\nprocessors:\n filter:\n metrics:\n include:\n match_type: strict\n metric_names:\n - ecs.task.memory.utilized\n\n metricstransform:\n transforms:\n - include: ecs.task.memory.utilized\n action: update\n new_name: MemoryUtilized\n\nservice:\n pipelines:\n metrics:\n receivers: [awsecscontainermetrics]\n processors: [filter, metricstransform]\n exporters: [awsemf]\n```\n\n## Available Metrics\nFollowing is the full list of metrics emitted by this receiver.\n\nTask Level Metrics | Container Level Metrics | Unit \n------------ | ------------- | --------------------\necs.task.memory.usage | container.memory.usage | Bytes\necs.task.memory.usage.max | container.memory.usage.max | Bytes\necs.task.memory.usage.limit | container.memory.usage.limit | Bytes\necs.task.memory.reserved | container.memory.reserved | Megabytes\necs.task.memory.utilized | container.memory.utilized | Megabytes\necs.task.cpu.usage.total | container.cpu.usage.total | Nanoseconds\necs.task.cpu.usage.kernelmode | container.cpu.usage.kernelmode | Nanoseconds\necs.task.cpu.usage.usermode | container.cpu.usage.usermode | Nanoseconds\necs.task.cpu.usage.system | container.cpu.usage.system | Nanoseconds\necs.task.cpu.usage.vcpu | container.cpu.usage.vcpu | vCPU\necs.task.cpu.cores | container.cpu.cores | Count\necs.task.cpu.onlines | container.cpu.onlines | Count\necs.task.cpu.reserved | container.cpu.reserved | vCPU\necs.task.cpu.utilized | container.cpu.utilized | Percent\necs.task.network.rate.rx\t| container.network.rate.rx\t| Bytes/Second\necs.task.network.rate.tx\t| container.network.rate.tx\t| Bytes/Second\necs.task.network.io.usage.rx_bytes\t| container.network.io.usage.rx_bytes\t| Bytes\necs.task.network.io.usage.rx_packets\t| container.network.io.usage.rx_packets\t| Count\necs.task.network.io.usage.rx_errors |\tcontainer.network.io.usage.rx_errors\t| Count\necs.task.network.io.usage.rx_dropped |\tcontainer.network.io.usage.rx_dropped\t| Count\necs.task.network.io.usage.tx_bytes | container.network.io.usage.tx_bytes\t| Bytes\necs.task.network.io.usage.tx_packets\t| container.network.io.usage.tx_packets\t| Count\necs.task.network.io.usage.tx_errors\t| container.network.io.usage.tx_errors\t| Count\necs.task.network.io.usage.tx_dropped\t| container.network.io.usage.tx_dropped\t| Count\necs.task.storage.read_bytes | container.storage.read_bytes| Bytes\necs.task.storage.write_bytes | container.storage.write_bytes | Bytes\n\n\n## Resource Attributes and Metrics Labels\nMetrics emitted by this receiver comes with a set of resource attributes. These resource attributes can be converted to metrics labels using appropriate processors/exporters (See `Full Configuration Examples` section below). Finally, these metrics labels can be set as metrics dimensions while exporting to desired destinations. Check the following table to see available resource attributes for Task and Container level metrics. Container level metrics have three additional attributes than task level metrics.\n\nResource Attributes for Task Level Metrics | Resource Attributes for Container Level Metrics\n-------------------- | -----------------------------\naws.ecs.cluster.name | aws.ecs.cluster.name\naws.ecs.task.family | aws.ecs.task.family\naws.ecs.task.arn | aws.ecs.task.arn\naws.ecs.task.id | aws.ecs.task.id\naws.ecs.task.revision | aws.ecs.task.revision\naws.ecs.service.name | aws.ecs.service.name\ncloud.availability_zone | cloud.availability_zone\ncloud.account.id | cloud.account.id\ncloud.region | cloud.region\naws.ecs.task.pull_started_at | aws.ecs.container.started_at\naws.ecs.task.pull_stopped_at | aws.ecs.container.finished_at\naws.ecs.task.known_status | aws.ecs.container.know_status\naws.ecs.launch_type | aws.ecs.launch_type\n\u0026nbsp; | aws.ecs.container.created_at\n\u0026nbsp; | container.name\n\u0026nbsp; | container.id\n\u0026nbsp; | aws.ecs.docker.name \n\u0026nbsp; | container.image.tag\n\u0026nbsp; | aws.ecs.container.image.id\n\u0026nbsp; | aws.ecs.container.exit_code\n\n## Full Configuration Examples\nThis receiver emits 52 unique metrics. Customer may not want to send all of them to destinations. Following sections will show full configuration files for filtering and transforming existing metrics with different processors/exporters. \n\n### 1. Full configuration for task level metrics\nThe following example shows a full configuration to get most useful task level metrics. It uses `awsecscontainermetrics` receiver to collect all the resource usage metrics from ECS task metadata endpoint. It applies `filter` processor to select only 8 task-level metrics and update metric names using `metricstransform` processor. It also renames the resource attributes using `resource` processor which will be used as metric dimensions in the Amazon CloudWatch `awsemf` exporter. Finally, it sends the metrics to CloudWatch using `awsemf` exporter under the `/aws/ecs/containerinsights/{ClusterName}/performance` namespace where the `{ClusterName}` placeholder will be replaced with actual cluster name. Check the [AWS EMF Exporter](https://aws-otel.github.io/docs/getting-started/cloudwatch-metrics) documentation to see and explore the metrics in Amazon CloudWatch.\n\n**Note:** AWS OpenTelemetry Collector has a [default configuration](https://github.com/aws-observability/aws-otel-collector/blob/main/config/ecs/container-insights/otel-task-metrics-config.yaml) backed into it for Container Insights experience which is smiliar to this one. Follow our [setup](https://aws-otel.github.io/docs/setup/ecs) doc to check how to use that default config.\n\n```yaml\nreceivers:\n awsecscontainermetrics: # collect 52 metrics\n\nprocessors:\n filter: # filter metrics\n metrics:\n include:\n match_type: strict\n metric_names: # select only 8 task level metrics out of 52\n - ecs.task.memory.reserved\n - ecs.task.memory.utilized\n - ecs.task.cpu.reserved\n - ecs.task.cpu.utilized\n - ecs.task.network.rate.rx\n - ecs.task.network.rate.tx\n - ecs.task.storage.read_bytes\n - ecs.task.storage.write_bytes\n metricstransform: # update metric names\n transforms:\n - include: ecs.task.memory.utilized\n action: update\n new_name: MemoryUtilized\n - include: ecs.task.memory.reserved\n action: update\n new_name: MemoryReserved\n - include: ecs.task.cpu.utilized\n action: update\n new_name: CpuUtilized\n - include: ecs.task.cpu.reserved\n action: update\n new_name: CpuReserved\n - include: ecs.task.network.rate.rx\n action: update\n new_name: NetworkRxBytes\n - include: ecs.task.network.rate.tx\n action: update\n new_name: NetworkTxBytes\n - include: ecs.task.storage.read_bytes\n action: update\n new_name: StorageReadBytes\n - include: ecs.task.storage.write_bytes\n action: update\n new_name: StorageWriteBytes\n resource:\n attributes: # rename resource attributes which will be used as dimensions\n - key: ClusterName\n from_attribute: aws.ecs.cluster.name\n action: insert\n - key: aws.ecs.cluster.name\n action: delete\n - key: ServiceName\n from_attribute: aws.ecs.service.name\n action: insert\n - key: aws.ecs.service.name\n action: delete\n - key: TaskId\n from_attribute: aws.ecs.task.id\n action: insert\n - key: aws.ecs.task.id\n action: delete\n - key: TaskDefinitionFamily\n from_attribute: aws.ecs.task.family\n action: insert\n - key: aws.ecs.task.family\n action: delete\nexporters:\n awsemf:\n namespace: ECS/ContainerInsights\n log_group_name: '/aws/ecs/containerinsights/{ClusterName}/performance'\n log_stream_name: '{TaskId}' # TaskId placeholder will be replaced with actual value\n resource_to_telemetry_conversion:\n enabled: true\n dimension_rollup_option: NoDimensionRollup\n metric_declarations:\n dimensions: [ [ ClusterName ], [ ClusterName, TaskDefinitionFamily ] ]\n metric_name_selectors: [ . ]\nservice:\n pipelines:\n metrics:\n receivers: [awsecscontainermetrics ]\n processors: [filter, metricstransform, resource]\n exporters: [ awsemf ]\n```\n\n\n### 2. Full configuration for task- and container-level metrics\n\nThe following example shows a full configuration to get most useful task- and container-level metrics. It uses `awsecscontainermetrics` receiver to collect all the resource usage metrics from ECS task metadata endpoint. It applies `filter` processor to select only 8 task- and container-level metrics and update metric names using `metricstransform` processor. It also renames the resource attributes using `resource` processor which will be used as metric dimensions in the Amazon CloudWatch `awsemf` exporter. Finally, it sends the metrics to CloudWatch using `awsemf` exporter under the /`aws/ecs/containerinsights/{ClusterName}/performance` namespace where the `{ClusterName}` placeholder will be replaced with actual cluster name. Check the [AWS EMF Exporter](https://aws-otel.github.io/docs/getting-started/cloudwatch-metrics) documentation to see and explore the metrics in Amazon CloudWatch.\n\n```yaml\nreceivers:\n awsecscontainermetrics:\n\nprocessors:\n filter:\n metrics:\n include:\n match_type: regexp\n metric_names:\n - .*memory.reserved\n - .*memory.utilized\n - .*cpu.reserved\n - .*cpu.utilized\n - .*network.rate.rx\n - .*network.rate.tx\n - .*storage.read_bytes\n - .*storage.write_bytes\n metricstransform:\n transforms:\n - include: ecs.task.memory.utilized\n action: update\n new_name: MemoryUtilized\n - include: ecs.task.memory.reserved\n action: update\n new_name: MemoryReserved\n - include: ecs.task.cpu.utilized\n action: update\n new_name: CpuUtilized\n - include: ecs.task.cpu.reserved\n action: update\n new_name: CpuReserved\n - include: ecs.task.network.rate.rx\n action: update\n new_name: NetworkRxBytes\n - include: ecs.task.network.rate.tx\n action: update\n new_name: NetworkTxBytes\n - include: ecs.task.storage.read_bytes\n action: update\n new_name: StorageReadBytes\n - include: ecs.task.storage.write_bytes\n action: update\n new_name: StorageWriteBytes\n resource:\n attributes:\n - key: ClusterName\n from_attribute: aws.ecs.cluster.name\n action: insert\n - key: aws.ecs.cluster.name\n action: delete\n - key: ServiceName\n from_attribute: aws.ecs.service.name\n action: insert\n - key: aws.ecs.service.name\n action: delete\n - key: TaskId\n from_attribute: aws.ecs.task.id\n action: insert\n - key: aws.ecs.task.id\n action: delete\n - key: TaskDefinitionFamily\n from_attribute: aws.ecs.task.family\n action: insert\n - key: aws.ecs.task.family\n action: delete\n - key: ContainerName\n from_attribute: container.name\n action: insert\n - key: container.name\n action: delete \nexporters:\n awsemf:\n namespace: ECS/ContainerInsights\n log_group_name: '/aws/ecs/containerinsights/{ClusterName}/performance'\n log_stream_name: '{TaskId}'\n resource_to_telemetry_conversion:\n enabled: true\n dimension_rollup_option: NoDimensionRollup\n metric_declarations:\n - dimensions: [[ClusterName], [ClusterName, TaskDefinitionFamily]]\n metric_name_selectors: \n - MemoryUtilized \n - MemoryReserved \n - CpuUtilized\n - CpuReserved\n - NetworkRxBytes\n - NetworkTxBytes\n - StorageReadBytes\n - StorageWriteBytes\n - dimensions: [[ClusterName], [ClusterName, TaskDefinitionFamily, ContainerName]]\n metric_name_selectors: [container.*]\n \nservice:\n pipelines:\n metrics:\n receivers: [awsecscontainermetrics]\n processors: [filter, metricstransform, resource]\n exporters: [awsemf]\n```\n\n## Reference\n1. [Setup OpenTelemetry Collector on Amazon ECS](https://aws-otel.github.io/docs/setup/ecs)\n2. [Getting Started with ECS Container Metrics Receiver in the OpenTelemetry Collector](https://aws-otel.github.io/docs/components/ecs-metrics-receiver)","properties":{"collection_interval":{"description":"CollectionInterval is the interval at which metrics should be collected","title":"collection_interval","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.awsfirehosereceiver.Config":{"additionalProperties":false,"markdownDescription":"# AWS Kinesis Data Firehose Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [alpha]: metrics |\n| Distributions | [contrib], [observiq], [sumo] |\n\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nReceiver for ingesting AWS Kinesis Data Firehose delivery stream messages and parsing the records received based on the configured record type.\n\n## Configuration\n\nExample:\n\n```yaml\nreceivers:\n awsfirehose:\n endpoint: 0.0.0.0:4433\n record_type: cwmetrics\n access_key: \"some_access_key\"\n tls:\n cert_file: server.crt\n key_file: server.key\n```\nThe configuration includes the Opentelemetry collector's server [confighttp](https://github.com/open-telemetry/opentelemetry-collector/tree/main/config/confighttp#server-configuration),\nwhich allows for a variety of settings. Only the most relevant ones will be discussed here, but all are available.\nThe AWS Kinesis Data Firehose Delivery Streams currently only support HTTPS endpoints using port 443. This can be potentially circumvented\nusing a Load Balancer.\n\n### endpoint:\nThe address:port to bind the listener to.\n\ndefault: `0.0.0.0:4433`\n\n### tls:\nSee [documentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md#server-configuration) for more details.\n\nA `cert_file` and `key_file` are required.\n\n### record_type:\nThe type of record being received from the delivery stream. Each unmarshaler handles a specific type, so the field allows the receiver to use the correct one.\n\ndefault: `cwmetrics`\n\nSee the [Record Types](#record-types) section for all available options.\n\n### access_key (Optional):\nThe access key to be checked on each request received. This can be set when creating or updating the delivery stream.\nSee [documentation](https://docs.aws.amazon.com/firehose/latest/dev/create-destination.html#create-destination-http) for details.\n\n## Record Types\n\n### cwmetrics\nThe record type for the CloudWatch metric stream. Expects the format for the records to be JSON.\nSee [documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Metric-Streams.html) for details.","properties":{"access_key":{"description":"AccessKey is checked against the one received with each request.\nThis can be set when creating or updating the Firehose delivery\nstream.","title":"access_key","type":"string"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth for this receiver","title":"auth"},"cors":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.confighttp.CORSSettings","description":"CORS configures the server for HTTP cross-origin resource sharing (CORS).","title":"cors"},"endpoint":{"description":"Endpoint configures the listening address for the server.","title":"endpoint","type":"string"},"include_metadata":{"description":"IncludeMetadata propagates the client metadata from the incoming requests to the downstream consumers\nExperimental: *NOTE* this option is subject to change or removal in the future.","title":"include_metadata","type":"boolean"},"max_request_body_size":{"description":"MaxRequestBodySize sets the maximum request body size in bytes","title":"max_request_body_size","type":"integer"},"record_type":{"description":"RecordType is the key used to determine which unmarshaler to use\nwhen receiving the requests.","title":"record_type","type":"string"},"response_headers":{"description":"Additional headers attached to each HTTP response sent to the client.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"response_headers","type":"object"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSServerSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.awsxrayreceiver.Config":{"additionalProperties":false,"description":"Config defines the configurations for an AWS X-Ray receiver.","markdownDescription":"# AWS X-Ray Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: traces |\n| Distributions | [contrib], [aws], [observiq], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[aws]: https://github.com/aws-observability/aws-otel-collector\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n## Overview\nThe AWS X-Ray receiver accepts segments (i.e. spans) in the [X-Ray Segment format](https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html).\nThis enables the collector to receive spans emitted by the existing X-Ray SDK. [Centralized sampling](https://github.com/aws/aws-xray-daemon/blob/master/CHANGELOG.md#300-2018-08-28) is also supported via a local TCP port.\n\nThe requests sent to AWS are authenticated using the mechanism documented [here](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials).\n\n## Configuration\n\nExample:\n\n```yaml\nreceivers:\n awsxray:\n endpoint: 0.0.0.0:2000\n transport: udp\n proxy_server:\n endpoint: 0.0.0.0:2000\n proxy_address: \"\"\n tls:\n insecure: false\n server_name_override: \"\"\n region: \"\"\n role_arn: \"\"\n aws_endpoint: \"\"\n local_mode: false\n```\n\nThe default configurations below are based on the [default configurations](https://github.com/aws/aws-xray-daemon/blob/master/pkg/cfg/cfg.go#L99) of the existing X-Ray Daemon.\n\n### endpoint (Optional)\nThe UDP address and port on which this receiver listens for X-Ray segment documents emitted by the X-Ray SDK.\n\nDefault: `0.0.0.0:2000`\n\n### transport (Optional)\nThis should always be \"udp\" as X-Ray SDKs only send segments using UDP.\n\nDefault: `udp`\n\n### proxy_server (Optional)\nDefines configurations related to the local TCP proxy server.\n\n### endpoint (Optional)\nThe TCP address and port on which this receiver listens for calls from the X-Ray SDK and relays them to the AWS X-Ray backend to get sampling rules and report sampling statistics.\n\nDefault: `0.0.0.0:2000`\n\n### proxy_address (Optional)\nDefines the proxy address that the local TCP server forwards HTTP requests to AWS X-Ray backend through. If left unconfigured, requests will be sent directly.\n\n### insecure (Optional)\nEnables or disables TLS certificate verification when the local TCP server forwards HTTP requests to the AWS X-Ray backend. This sets the `InsecureSkipVerify` in the [TLSConfig](https://godoc.org/crypto/tls#Config). When setting to true, TLS is susceptible to man-in-the-middle attacks so it should be used only for testing.\n\nDefault: `false`\n\n### server_name_override (Optional)\nThis sets the ``ServerName` in the [TLSConfig](https://godoc.org/crypto/tls#Config).\n\n### region (Optional)\nThe AWS region the local TCP server forwards requests to. When missing, we will try to retrieve this value through environment variables or optionally ECS/EC2 metadata endpoint (depends on `local_mode` below).\n\n### role_arn (Optional)\nThe IAM role used by the local TCP server when communicating with the AWS X-Ray service. If non-empty, the receiver will attempt to call STS to retrieve temporary credentials, otherwise the standard AWS credential [lookup](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials) will be performed.\n\n### aws_endpoint (Optional)\nThe X-Ray service endpoint which the local TCP server forwards requests to.\n\n### local_mode (Optional)\nDetermines whether the ECS/EC2 instance metadata endpoint will be called to fetch the AWS region to send requests to. Set to `true` to skip metadata check.\n\nDefault: `false`","properties":{"endpoint":{"description":"Endpoint configures the address for this network connection.\nFor TCP and UDP networks, the address has the form \"host:port\". The host must be a literal IP address,\nor a host name that can be resolved to IP addresses. The port must be a literal port number or a service name.\nIf the host is a literal IPv6 address it must be enclosed in square brackets, as in \"[2001:db8::1]:80\" or\n\"[fe80::1%zone]:80\". The zone specifies the scope of the literal IPv6 address as defined in RFC 4007.","title":"endpoint","type":"string"},"proxy_server":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.internal.aws.proxy.Config","description":"ProxyServer defines configurations related to the local TCP proxy server.","title":"proxy_server"},"transport":{"description":"Transport to use. Known protocols are \"tcp\", \"tcp4\" (IPv4-only), \"tcp6\" (IPv6-only), \"udp\", \"udp4\" (IPv4-only),\n\"udp6\" (IPv6-only), \"ip\", \"ip4\" (IPv4-only), \"ip6\" (IPv6-only), \"unix\", \"unixgram\" and \"unixpacket\".","title":"transport","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.azureblobreceiver.Config":{"additionalProperties":false,"properties":{"connection_string":{"description":"Azure Blob Storage connection key,\nwhich can be found in the Azure Blob Storage resource on the Azure Portal. (no default)","title":"connection_string","type":"string"},"event_hub":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.azureblobreceiver.EventHubConfig","description":"Configurations of Azure Event Hub triggering on the `Blob Create` event","title":"event_hub"},"logs":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.azureblobreceiver.LogsConfig","description":"Logs related configurations","title":"logs"},"traces":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.azureblobreceiver.TracesConfig","description":"Traces related configurations","title":"traces"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.azureblobreceiver.EventHubConfig":{"additionalProperties":false,"properties":{"endpoint":{"description":"Azure Event Hub endpoint triggering on the `Blob Create` event","title":"endpoint","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.azureblobreceiver.LogsConfig":{"additionalProperties":false,"properties":{"container_name":{"description":"Name of the blob container with the logs (default = \"logs\")","title":"container_name","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.azureblobreceiver.TracesConfig":{"additionalProperties":false,"properties":{"container_name":{"description":"Name of the blob container with the traces (default = \"traces\")","title":"container_name","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.azureeventhubreceiver.Config":{"additionalProperties":false,"markdownDescription":"# Azure Event Hub Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [alpha]: metrics, logs |\n| Distributions | [contrib], [observiq], [splunk], [sumo] |\n\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\n## Overview\nAzure resources and services can be\n[configured](https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/diagnostic-settings)\nto send their logs to an Azure Event Hub. The Azure Event Hub receiver pulls logs from an Azure\nEvent Hub, transforms them, and pushes them through the collector pipeline.\n\n## Configuration\n\n### connection (Required)\nA string describing the connection to an Azure event hub.\n\n### partition (Optional)\nThe partition to watch. If empty, it will watch explicitly all partitions.\n\nDefault: \"\"\n\n### offset (Optional)\nThe offset at which to start watching the event hub. If empty, it starts with the latest offset.\n\nDefault: \"\"\n\n### format (Optional)\nDetermines how to transform the Event Hub messages into OpenTelemetry logs. See the \"Format\"\nsection below for details.\n\nDefault: \"azure\"\n\n### Example Configuration\n\n```yaml\nreceivers:\n azureeventhub:\n connection: Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName\n partition: foo\n offset: \"1234-5566\"\n format: \"azure\"\n```\n\nThis component can persist its state using the [storage extension].\n\n## Format\n\n### raw\n\nThe \"raw\" format maps the AMQP properties and data into the\nattributes and body of an OpenTelemetry LogRecord, respectively.\nThe body is represented as a raw byte array.\n\nThis format is not supported for Metrics.\n\n### azure\n\nThe \"azure\" format extracts the Azure log records from the AMQP\nmessage data, parses them, and maps the fields to OpenTelemetry\nattributes. The table below summarizes the mapping between the \n[Azure common log format](https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/resource-logs-schema)\nand the OpenTelemetry attributes.\n\n\n| Azure | OpenTelemetry | \n|----------------------------------|----------------------------------------|\n| callerIpAddress (optional) | net.sock.peer.addr (attribute) | \n| correlationId (optional) | azure.correlation.id (attribute) | \n| category (optional) | azure.category (attribute) | \n| durationMs (optional) | azure.duration (attribute) | \n| Level (optional) | severity_number, severity_text (field) | \n| location (optional) | cloud.region (attribute) | \n| — | cloud.provider (attribute) | \n| operationName (required) | azure.operation.name (attribute) |\n| operationVersion (optional) | azure.operation.version (attribute) | \n| properties (optional) | azure.properties (attribute, nested) | \n| resourceId (required) | azure.resource.id (resource attribute) | \n| resultDescription (optional) | azure.result.description (attribute) | \n| resultSignature (optional) | azure.result.signature (attribute) | \n| resultType (optional) | azure.result.type (attribute) | \n| tenantId (required, tenant logs) | azure.tenant.id (attribute) | \n| time (required) | time_unix_nano (field) | \n| identity (optional) | azure.identity (attribute, nested) |\n\nNote: JSON does not distinguish between fixed and floating point numbers. All\nJSON numbers are encoded as doubles.\n\nFor Metrics the Azure Metric Records are an array\nof \"records\" with the following fields.\n\n| Azure |\n|------------|\n| time |\n| resourceId |\n| metricName |\n| timeGrain |\n| total |\n| count |\n| minimum |\n| maximum |\n| average |\n\nFrom this data a Metric of type Summary is created\nwith a single Data Point that represents the value\nfrom the \"total\" field.\n\n[storage extension]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/extension/storage","properties":{"connection":{"title":"connection","type":"string"},"format":{"title":"format","type":"string"},"offset":{"title":"offset","type":"string"},"partition":{"title":"partition","type":"string"},"storage":{"title":"storage","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.azuremonitorreceiver.Config":{"additionalProperties":false,"description":"Config defines the configuration for the various elements of the receiver agent.","markdownDescription":"# Azure Monitor Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [development]: metrics |\n| Distributions | [contrib] |\n\n[development]: https://github.com/open-telemetry/opentelemetry-collector#development\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n\u003c!-- end autogenerated section --\u003e\n\nThis receiver scrapes Azure Monitor API for resources metrics.\n\n## Configuration\n\nThe following settings are required:\n- `subscription_id`\n- `tenant_id`\n- `client_id`\n- `client_secret`\n\nThe following settings are optional:\n- `resource_groups` (default = none): Filter metrics for specific resource groups, not setting a value will scrape metrics for all resources in the subscription.\n- `services` (default = none): Filter metrics for specific services, not setting a value will scrape metrics for all services integrated with Azure Monitor.\n- `cache_resources` (default = 86400): List of resources will be cached for the provided amount of time in seconds.\n- `cache_resources_definitions` (default = 86400): List of metrics definitions will be cached for the provided amount of time in seconds.\n- `maximum_number_of_metrics_in_a_call` (default = 20): Maximum number of metrics to fetch in per API call, current limit in Azure is 20 (as of 03/27/2023).\n- `initial_delay` (default = `1s`): defines how long this receiver waits before starting.\n\n### Example Configuration\n\n```yaml\nreceivers:\n azuremonitor:\n subscription_id: \"${subscription_id}\"\n tenant_id: \"${tenant_id}\"\n client_id: \"${client_id}\"\n client_secret: \"${env:CLIENT_SECRET}\"\n resource_groups:\n - \"${resource_group1}\"\n - \"${resource_group2}\"\n services:\n - \"${service1}\"\n - \"${service2}\"\n collection_interval: 60s\n initial_delay: 1s\n```\n\n## Metrics\n\nDetails about the metrics scraped by this receiver can be found in [Supported metrics with Azure Monitor](https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/metrics-supported). This receiver adds the prefix \"azure_\" to all scraped metrics.","properties":{"append_tags_as_attributes":{"title":"append_tags_as_attributes","type":"boolean"},"cache_resources":{"title":"cache_resources","type":"number"},"cache_resources_definitions":{"title":"cache_resources_definitions","type":"number"},"client_id":{"title":"client_id","type":"string"},"client_secret":{"title":"client_secret","type":"string"},"collection_interval":{"title":"collection_interval","type":"string"},"initial_delay":{"title":"initial_delay","type":"string"},"maximum_number_of_metrics_in_a_call":{"title":"maximum_number_of_metrics_in_a_call","type":"integer"},"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.azuremonitorreceiver.internal.metadata.ResourceAttributesSettings","title":"resource_attributes"},"resource_groups":{"items":{"type":"string"},"title":"resource_groups","type":"array"},"services":{"items":{"type":"string"},"title":"services","type":"array"},"subscription_id":{"title":"subscription_id","type":"string"},"tenant_id":{"title":"tenant_id","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.azuremonitorreceiver.internal.metadata.ResourceAttributeSettings":{"additionalProperties":false,"description":"ResourceAttributeSettings provides common settings for a particular metric.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.azuremonitorreceiver.internal.metadata.ResourceAttributesSettings":{"additionalProperties":false,"description":"ResourceAttributesSettings provides settings for azuremonitorreceiver metrics.","properties":{"azuremonitor.subscription_id":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.azuremonitorreceiver.internal.metadata.ResourceAttributeSettings","title":"azuremonitor.subscription_id"},"azuremonitor.tenant_id":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.azuremonitorreceiver.internal.metadata.ResourceAttributeSettings","title":"azuremonitor.tenant_id"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.Config":{"additionalProperties":false,"description":"Config defines the configuration for the various elements of the receiver agent.","markdownDescription":"# F5 Big-IP Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: metrics |\n| Distributions | [contrib], [observiq], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThis receiver fetches stats from a F5 Big-IP node using F5's [iControl REST API](https://clouddocs.f5.com/api/icontrol-rest).\n\n## Prerequisites\n\nThis receiver supports Big-IP versions `11.6.5+`\n\n## Configuration\n\nThe following settings are required:\n\n- `username`\n- `password`\n\nThe following settings are optional:\n\n- `endpoint` (default: `https://localhost:443`): The URL of the Big-IP environment.\n- `collection_interval` (default = `10s`): This receiver collects metrics on an interval. Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`.\n- `tls` (defaults defined [here](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md)): TLS control. By default insecure settings are rejected and certificate verification is on.\n\n### Example Configuration\n\n```yaml\nreceivers:\n bigip:\n collection_interval: 10s\n endpoint: https://localhost:443\n username: otelu\n password: ${env:BIGIP_PASSWORD}\n tls:\n insecure_skip_verify: true\n```\n\nThe full list of settings exposed for this receiver are documented [here](./config.go) with detailed sample configurations [here](./testdata/config.yaml). TLS config is documented further under the [opentelemetry collector's configtls package](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md).\n\n## Metrics\n\nDetails about the metrics produced by this receiver can be found in [documentation.md](./documentation.md)","properties":{"CustomRoundTripper":{"description":"Custom Round Tripper to allow for individual components to intercept HTTP requests","title":"CustomRoundTripper"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing HTTP calls.","title":"auth"},"collection_interval":{"title":"collection_interval","type":"string"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"endpoint":{"description":"The target URL to send data to (e.g.: http://some.url:9411/v1/traces).","title":"endpoint","type":"string"},"headers":{"description":"Additional headers attached to each HTTP request sent by the client.\nExisting header values are overwritten if collision happens.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"idle_conn_timeout":{"description":"IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"idle_conn_timeout","type":"string"},"initial_delay":{"title":"initial_delay","type":"string"},"max_conns_per_host":{"description":"MaxConnsPerHost limits the total number of connections per host, including connections in the dialing,\nactive, and idle states.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_conns_per_host","type":"integer"},"max_idle_conns":{"description":"MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns","type":"integer"},"max_idle_conns_per_host":{"description":"MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns_per_host","type":"integer"},"metrics":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.MetricsConfig","title":"metrics"},"password":{"title":"password","type":"string"},"read_buffer_size":{"description":"ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize.","title":"read_buffer_size","type":"integer"},"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.ResourceAttributesConfig","title":"resource_attributes"},"timeout":{"description":"Timeout parameter configures `http.Client.Timeout`.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"username":{"title":"username","type":"string"},"write_buffer_size":{"description":"WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize.","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.MetricConfig":{"additionalProperties":false,"description":"MetricConfig provides common config for a particular metric.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.MetricsConfig":{"additionalProperties":false,"description":"MetricsConfig provides config for bigip metrics.","properties":{"bigip.node.availability":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.MetricConfig","title":"bigip.node.availability"},"bigip.node.connection.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.MetricConfig","title":"bigip.node.connection.count"},"bigip.node.data.transmitted":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.MetricConfig","title":"bigip.node.data.transmitted"},"bigip.node.enabled":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.MetricConfig","title":"bigip.node.enabled"},"bigip.node.packet.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.MetricConfig","title":"bigip.node.packet.count"},"bigip.node.request.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.MetricConfig","title":"bigip.node.request.count"},"bigip.node.session.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.MetricConfig","title":"bigip.node.session.count"},"bigip.pool.availability":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.MetricConfig","title":"bigip.pool.availability"},"bigip.pool.connection.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.MetricConfig","title":"bigip.pool.connection.count"},"bigip.pool.data.transmitted":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.MetricConfig","title":"bigip.pool.data.transmitted"},"bigip.pool.enabled":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.MetricConfig","title":"bigip.pool.enabled"},"bigip.pool.member.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.MetricConfig","title":"bigip.pool.member.count"},"bigip.pool.packet.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.MetricConfig","title":"bigip.pool.packet.count"},"bigip.pool.request.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.MetricConfig","title":"bigip.pool.request.count"},"bigip.pool_member.availability":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.MetricConfig","title":"bigip.pool_member.availability"},"bigip.pool_member.connection.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.MetricConfig","title":"bigip.pool_member.connection.count"},"bigip.pool_member.data.transmitted":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.MetricConfig","title":"bigip.pool_member.data.transmitted"},"bigip.pool_member.enabled":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.MetricConfig","title":"bigip.pool_member.enabled"},"bigip.pool_member.packet.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.MetricConfig","title":"bigip.pool_member.packet.count"},"bigip.pool_member.request.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.MetricConfig","title":"bigip.pool_member.request.count"},"bigip.pool_member.session.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.MetricConfig","title":"bigip.pool_member.session.count"},"bigip.virtual_server.availability":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.MetricConfig","title":"bigip.virtual_server.availability"},"bigip.virtual_server.connection.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.MetricConfig","title":"bigip.virtual_server.connection.count"},"bigip.virtual_server.data.transmitted":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.MetricConfig","title":"bigip.virtual_server.data.transmitted"},"bigip.virtual_server.enabled":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.MetricConfig","title":"bigip.virtual_server.enabled"},"bigip.virtual_server.packet.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.MetricConfig","title":"bigip.virtual_server.packet.count"},"bigip.virtual_server.request.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.MetricConfig","title":"bigip.virtual_server.request.count"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig provides common config for a particular resource attribute.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.ResourceAttributesConfig":{"additionalProperties":false,"description":"ResourceAttributesConfig provides config for bigip resource attributes.","properties":{"bigip.node.ip_address":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.ResourceAttributeConfig","title":"bigip.node.ip_address"},"bigip.node.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.ResourceAttributeConfig","title":"bigip.node.name"},"bigip.pool.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.ResourceAttributeConfig","title":"bigip.pool.name"},"bigip.pool_member.ip_address":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.ResourceAttributeConfig","title":"bigip.pool_member.ip_address"},"bigip.pool_member.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.ResourceAttributeConfig","title":"bigip.pool_member.name"},"bigip.virtual_server.destination":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.ResourceAttributeConfig","title":"bigip.virtual_server.destination"},"bigip.virtual_server.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.internal.metadata.ResourceAttributeConfig","title":"bigip.virtual_server.name"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.carbonreceiver.Config":{"additionalProperties":false,"description":"Config defines configuration for the Carbon receiver.","markdownDescription":"# Carbon Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: metrics |\n| Distributions | [contrib], [observiq], [splunk], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe [Carbon](https://github.com/graphite-project/carbon) receiver supports\nCarbon's [plaintext\nprotocol](https://graphite.readthedocs.io/en/stable/feeding-carbon.html#the-plaintext-protocol).\n\n\u003e :information_source: The `wavefront` receiver is based on Carbon and binds to the\nsame port by default. This means the `carbon` and `wavefront` receivers\ncannot both be enabled with their respective default configurations. To\nsupport running both receivers in parallel, change the `endpoint` port on one\nof the receivers.\n\n## Configuration\n\nThe following settings are required:\n\n- `endpoint` (default = `0.0.0.0:2003`): Address and port that the\n receiver should bind to.\n- `transport` (default = `tcp`): Must be either `tcp` or `udp`.\n\nThe following setting are optional:\n\n- `tcp_idle_timeout` (default = `30s`): The maximum duration that a tcp\n connection will idle wait for new data. This value is ignored if the\n transport is not `tcp`.\n\nIn addition, a `parser` section can be defined with the following settings:\n\n- `type` (default `plaintext`): Specifies the type of parser to be used\n and must be either `plaintext` or `regex`.\n- `config`: Specifies any special configuration of the selected parser.\n\nExample:\n\n```yaml\nreceivers:\n carbon/receiver_settings:\n endpoint: localhost:8080\n transport: udp\n carbon/regex:\n parser:\n type: regex\n config:\n rules:\n - regexp: \"(?P\u003ckey_base\u003etest)\\\\.env(?P\u003ckey_env\u003e[^.]*)\\\\.(?P\u003ckey_host\u003e[^.]*)\"\n name_prefix: \"name-prefix\"\n labels:\n dot.key: dot.value\n key: value\n type: cumulative\n - regexp: \"(?P\u003ckey_just\u003etest)\\\\.(?P\u003ckey_match\u003e.*)\"\n name_separator: \"_\"\n```\n\nThe full list of settings exposed for this receiver are documented [here](./config.go)\nwith detailed sample configurations [here](./testdata/config.yaml).","properties":{"endpoint":{"description":"Endpoint configures the address for this network connection.\nFor TCP and UDP networks, the address has the form \"host:port\". The host must be a literal IP address,\nor a host name that can be resolved to IP addresses. The port must be a literal port number or a service name.\nIf the host is a literal IPv6 address it must be enclosed in square brackets, as in \"[2001:db8::1]:80\" or\n\"[fe80::1%zone]:80\". The zone specifies the scope of the literal IPv6 address as defined in RFC 4007.","title":"endpoint","type":"string"},"parser":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.carbonreceiver.protocol.Config","description":"Parser specifies a parser and the respective configuration to be used\nby the receiver.","title":"parser"},"tcp_idle_timeout":{"description":"TCPIdleTimeout is the timout for idle TCP connections, it is ignored\nif transport being used is UDP.","title":"tcp_idle_timeout","type":"string"},"transport":{"description":"Transport to use. Known protocols are \"tcp\", \"tcp4\" (IPv4-only), \"tcp6\" (IPv6-only), \"udp\", \"udp4\" (IPv4-only),\n\"udp6\" (IPv6-only), \"ip\", \"ip4\" (IPv4-only), \"ip6\" (IPv6-only), \"unix\", \"unixgram\" and \"unixpacket\".","title":"transport","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.carbonreceiver.protocol.Config":{"additionalProperties":false,"description":"Config is the general configuration for the parser to be used.","properties":{"config":{"description":"Config placeholder for the configuration object of the selected parser.","title":"config"},"type":{"description":"Type of the parser to be used with the arriving data.","title":"type","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.chronyreceiver.Config":{"additionalProperties":false,"markdownDescription":"# Chrony Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [alpha]: metrics |\n| Distributions | [contrib] |\n\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n\u003c!-- end autogenerated section --\u003e\n\nThe [chrony] receiver is a pure go implementation of the command `chronyc tracking` to allow for\nportability across systems and platforms. All of the data that would typically be captured by\nthe tracking command is made available in this receiver, see [documentation](./documentation.md) for\nmore details.\n\n## Configuration\n\n### Default\n\nBy default, the `chrony` receiver will default to the following configuration:\n\n```yaml\nchrony/defaults:\n address: unix:///var/run/chrony/chronyd.sock # The default port by chronyd to allow cmd access\n timeout: 10s # Allowing at least 10s for chronyd to respond before giving up\n\nchrony:\n # This will result in the same configuration as above\n```\n\n### Customised\n\nThe following options can be customised:\n\n- address (required) - the address on where to communicate to `chronyd`\n - The allowed formats are the following\n - udp://hostname:port\n - unix:///path/to/chrony.sock (Please note the triple slash)\n - unixgram:///path/to/chrony/sock\n - The network type `unix` will be converted to `unixgram` but both are permissible\n- timeout (optional) - The total amount of time allowed to read and process the data from chronyd\n - Recommendation: This value should be set above 1s to allow `chronyd` time to respond\n- collection_interval (optional) - how frequent this receiver should poll [chrony]\n- `initial_delay` (default = `1s`): defines how long this receiver waits before starting.\n- metrics (optional) - Which metrics should be exported, read the [documentation] for complete details\n\n## Example\n\nAn example of the configuration is:\n\n```yaml\nreceivers:\n chrony:\n address: unix:///var/run/chrony/chronyd.sock\n timeout: 10s\n collection_interval: 30s\n metrics:\n ntp.skew:\n enabled: true\n ntp.stratum:\n enabled: true\n```\n\nThe complete list of metrics emitted by this receiver is found in the [documentation].\n\n[documentation]: ./documentation.md\n[chrony]: https://chrony.tuxfamily.org/","properties":{"collection_interval":{"title":"collection_interval","type":"string"},"endpoint":{"description":"Endpoint is the published address or unix socket\nthat allows clients to connect to:\nThe allowed format is:\n unix:///path/to/chronyd/unix.sock\n udp://localhost:323\n\nThe default value is unix:///var/run/chrony/chronyd.sock","title":"endpoint","type":"string"},"initial_delay":{"title":"initial_delay","type":"string"},"metrics":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.chronyreceiver.internal.metadata.MetricsConfig","title":"metrics"},"timeout":{"description":"Timeout controls the max time allowed to read data from chronyd","title":"timeout","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.chronyreceiver.internal.metadata.MetricConfig":{"additionalProperties":false,"description":"MetricConfig provides common config for a particular metric.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.chronyreceiver.internal.metadata.MetricsConfig":{"additionalProperties":false,"description":"MetricsConfig provides config for chrony metrics.","properties":{"ntp.frequency.offset":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.chronyreceiver.internal.metadata.MetricConfig","title":"ntp.frequency.offset"},"ntp.skew":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.chronyreceiver.internal.metadata.MetricConfig","title":"ntp.skew"},"ntp.stratum":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.chronyreceiver.internal.metadata.MetricConfig","title":"ntp.stratum"},"ntp.time.correction":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.chronyreceiver.internal.metadata.MetricConfig","title":"ntp.time.correction"},"ntp.time.last_offset":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.chronyreceiver.internal.metadata.MetricConfig","title":"ntp.time.last_offset"},"ntp.time.rms_offset":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.chronyreceiver.internal.metadata.MetricConfig","title":"ntp.time.rms_offset"},"ntp.time.root_delay":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.chronyreceiver.internal.metadata.MetricConfig","title":"ntp.time.root_delay"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.cloudflarereceiver.Config":{"additionalProperties":false,"description":"Config holds all the parameters to start an HTTP server that can be sent logs from CloudFlare","markdownDescription":"# Cloudflare Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [alpha]: logs |\n| Distributions | [contrib], [observiq], [sumo] |\n\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\n\nThis Cloudflare receiver allows Cloudflare's [LogPush Jobs](https://developers.cloudflare.com/logs/logpush/) to send logs over HTTPS from the Cloudflare logs aggregation system to an OpenTelemetry collector.\n\n## Getting Started\n\nTo successfully operate this receiver, you must follow these steps in order:\n1. Have a Cloudflare site at the Enterprise plan level.\n - At the time the receiver was written, LogPush was available only for Enterprise sites.\n2. Receive a properly CA signed SSL certificate for use on the collector host.\n3. Configure the receiver using the previously acquired SSL certificate, and then start the collector.\n4. Create a LogPush HTTP destination job following the [directions](https://developers.cloudflare.com/logs/get-started/enable-destinations/http/) provided by Cloudflare. When the job is created, it will attempt to validate the connection to the receiver.\n - If you've configured the receiver with a `secret` to validate requests, ensure you add the value to the `destination_conf` parameter of the LogPush job by adding its value as a query parameter under the `header_X-CF-Secret` parameter. For example, `\"destination_conf\": \"https://example.com?header_X-CF-Secret=abcd1234\"`.\n - If you want the receiver to parse one of the fields as the log record's timestamp (`EdgeStartTimestamp` is the default), the timestamp should be formatted RFC3339. This is not the default format, and must be explicitly specified in your job config.\n - If using the deprecated `logpull_options` parameter to configure your job, this can be explicitly specified by adding `\u0026timestamps=rfc3339` to the `logpull_options` string when creating your LogPush job.\n - If using the `output_options` parameter to configure your job, this can be explicitly specified by setting the `timestamp_format` field of `output_options` to `\"rfc3339\"`\n - The receiver expects the uploaded logs to be in `ndjson` format with no template, prefix, suffix, or delimiter changes based on the options in `output_options`. The only [settings](https://developers.cloudflare.com/logs/reference/log-output-options/#output-types) supported by this receiver in `output_options` are `field_names`, `CVE-2021-44228`, and `sample_rate`.\n5. If the LogPush job creates successfully, the receiver is correctly configured and the LogPush job was able to send it a \"test\" message. If the job failed to create, the most likely issue is with the SSL configuration. Check both the LogPush API response and the receiver's logs for more details.\n\n## Configuration\n\n- `tls` (Cloudflare requires TLS, and self-signed will not be sufficient)\n - `cert_file` \n - You may need to append your CA certificate to the server's certificate, if it is not a CA known to the LogPush API.\n - `key_file`\n- `endpoint` \n - The endpoint on which the receiver will await requests from Cloudflare\n- `secret`\n - If this value is set, the receiver expects to see it in any valid requests under the `X-CF-Secret` header\n- `timestamp_field` (default: `EdgeStartTimestamp`)\n - This receiver was built with the Cloudflare `http_requests` dataset in mind, but should be able to support any Cloudflare dataset. If using another dataset, you will need to set the `timestamp_field` appropriately in order to have the log record be associated with the correct timestamp. the timestamp must be formatted RFC3339, as stated in the Getting Started section.\n- `attributes`\n - This parameter allows the receiver to be configured to set log record attributes based on fields found in the log message. The fields are not removed from the log message when set in this way. Only string, boolean, integer or float fields can be mapped using this parameter.\n\n\n### Example:\n\n```yaml\nreceivers:\n cloudflare:\n logs:\n tls:\n key_file: some_key_file\n cert_file: some_cert_file\n endpoint: 0.0.0.0:12345\n secret: 1234567890abcdef1234567890abcdef\n timestamp_field: EdgeStartTimestamp\n attributes:\n ClientIP: http_request.client_ip\n ClientRequestURI: http_request.uri\n```","properties":{"logs":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.cloudflarereceiver.LogsConfig","title":"logs"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.cloudflarereceiver.LogsConfig":{"additionalProperties":false,"properties":{"attributes":{"patternProperties":{".*":{"type":"string"}},"title":"attributes","type":"object"},"endpoint":{"title":"endpoint","type":"string"},"secret":{"title":"secret","type":"string"},"timestamp_field":{"title":"timestamp_field","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSServerSetting","title":"tls"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.cloudfoundryreceiver.Config":{"additionalProperties":false,"description":"Config defines configuration for Collectd receiver.","properties":{"rlp_gateway":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.cloudfoundryreceiver.RLPGatewayConfig","title":"rlp_gateway"},"uaa":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.cloudfoundryreceiver.UAAConfig","title":"uaa"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.cloudfoundryreceiver.LimitedTLSClientSetting":{"additionalProperties":false,"description":"LimitedTLSClientSetting is a subset of TLSClientSetting, see LimitedHTTPClientSettings for more details","properties":{"insecure_skip_verify":{"title":"insecure_skip_verify","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.cloudfoundryreceiver.RLPGatewayConfig":{"additionalProperties":false,"properties":{"CustomRoundTripper":{"description":"Custom Round Tripper to allow for individual components to intercept HTTP requests","title":"CustomRoundTripper"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing HTTP calls.","title":"auth"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"endpoint":{"description":"The target URL to send data to (e.g.: http://some.url:9411/v1/traces).","title":"endpoint","type":"string"},"headers":{"description":"Additional headers attached to each HTTP request sent by the client.\nExisting header values are overwritten if collision happens.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"idle_conn_timeout":{"description":"IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"idle_conn_timeout","type":"string"},"max_conns_per_host":{"description":"MaxConnsPerHost limits the total number of connections per host, including connections in the dialing,\nactive, and idle states.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_conns_per_host","type":"integer"},"max_idle_conns":{"description":"MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns","type":"integer"},"max_idle_conns_per_host":{"description":"MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns_per_host","type":"integer"},"read_buffer_size":{"description":"ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize.","title":"read_buffer_size","type":"integer"},"shard_id":{"title":"shard_id","type":"string"},"timeout":{"description":"Timeout parameter configures `http.Client.Timeout`.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"write_buffer_size":{"description":"WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize.","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.cloudfoundryreceiver.UAAConfig":{"additionalProperties":false,"properties":{"endpoint":{"title":"endpoint","type":"string"},"password":{"title":"password","type":"string"},"tls":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.cloudfoundryreceiver.LimitedTLSClientSetting","title":"tls"},"username":{"title":"username","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.collectdreceiver.Config":{"additionalProperties":false,"description":"Config defines configuration for Collectd receiver.","markdownDescription":"# CollectD `write_http` plugin JSON receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [alpha]: metrics |\n| Distributions | [contrib], [observiq], [splunk], [sumo] |\n\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThis receiver can receive data exported by the CollectD's `write_http`\nplugin. Only JSON format is supported. Authentication is not supported at\nthis time.\n\nThis receiver was donated by SignalFx and ported from SignalFx's Gateway\n(https://github.com/signalfx/gateway/tree/master/protocol/collectd). As a\nresult, this receiver supports some additional features that are technically\nnot compatible with stock CollectD's write_http plugin. That said, in\npractice such incompatibilities should never surface. For example, this\nreceiver supports extracting labels from different fields. Given a field\nvalue `field[a=b, k=v]`, this receiver will extract `a` and `b` as label keys\nand, `k` and `v` as the respective label values.\n\n## Configuration\n\nThe following settings are required:\n\n- `endpoint` (default = `localhost:8081`): Address to reach the desired Docker daemon.\n\nThe following settings are optional:\n\n- `attributes_prefix` (no default): Used to add query parameters in key=value format to all metrics.\n- `timeout` (default = `30s`): The request timeout for any docker daemon query.\n\nExample:\n\n```yaml\nreceivers:\n collectd:\n collectd/one:\n attributes_prefix: \"dap_\"\n endpoint: \"localhost:12345\"\n timeout: \"50s\"\n```\n\nThe full list of settings exposed for this receiver are documented [here](./config.go)\nwith detailed sample configurations [here](./testdata/config.yaml).","properties":{"attributes_prefix":{"title":"attributes_prefix","type":"string"},"encoding":{"title":"encoding","type":"string"},"endpoint":{"description":"Endpoint configures the address for this network connection.\nThe address has the form \"host:port\". The host must be a literal IP address, or a host name that can be\nresolved to IP addresses. The port must be a literal port number or a service name.\nIf the host is a literal IPv6 address it must be enclosed in square brackets, as in \"[2001:db8::1]:80\" or\n\"[fe80::1%zone]:80\". The zone specifies the scope of the literal IPv6 address as defined in RFC 4007.","title":"endpoint","type":"string"},"timeout":{"title":"timeout","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.couchdbreceiver.Config":{"additionalProperties":false,"description":"Config defines the configuration for the various elements of the receiver agent.","markdownDescription":"# CouchDB Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: metrics |\n| Distributions | [contrib], [observiq], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThis receiver fetches stats from a couchdb server using the `/_node/{node-name}/_stats/couchdb` [endpoint](https://docs.couchdb.org/en/latest/api/server/common.html#node-node-name-stats).\n\n## Prerequisites\n\nThis receiver supports Couchdb versions `2.3+` and `3.1+`.\n\n## Configuration\n\nThe following settings are required:\n\n- `username`\n- `password`\n\nThe following settings are optional:\n\n- `endpoint` (default: `http://localhost:5984`): The URL of the couchdb endpoint\n\n- `collection_interval` (default = `60s`): This receiver collects metrics on an interval. This value must be a string readable by Golang's [time.ParseDuration](https://pkg.go.dev/time#ParseDuration). Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`.\n\n### Example Configuration\n\n```yaml\nreceivers:\n couchdb:\n endpoint: http://localhost:5984\n username: otelu\n password: ${env:COUCHDB_PASSWORD}\n collection_interval: 60s\n```\n\nThe full list of settings exposed for this receiver are documented [here](./config.go) with detailed sample configurations [here](./testdata/config.yaml). TLS config is documented further under the [opentelemetry collector's configtls package](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md).\n\n## Metrics\n\nDetails about the metrics produced by this receiver can be found in [metadata.yaml](./metadata.yaml)","properties":{"CustomRoundTripper":{"description":"Custom Round Tripper to allow for individual components to intercept HTTP requests","title":"CustomRoundTripper"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing HTTP calls.","title":"auth"},"collection_interval":{"title":"collection_interval","type":"string"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"endpoint":{"description":"The target URL to send data to (e.g.: http://some.url:9411/v1/traces).","title":"endpoint","type":"string"},"headers":{"description":"Additional headers attached to each HTTP request sent by the client.\nExisting header values are overwritten if collision happens.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"idle_conn_timeout":{"description":"IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"idle_conn_timeout","type":"string"},"initial_delay":{"title":"initial_delay","type":"string"},"max_conns_per_host":{"description":"MaxConnsPerHost limits the total number of connections per host, including connections in the dialing,\nactive, and idle states.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_conns_per_host","type":"integer"},"max_idle_conns":{"description":"MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns","type":"integer"},"max_idle_conns_per_host":{"description":"MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns_per_host","type":"integer"},"metrics":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.couchdbreceiver.internal.metadata.MetricsConfig","title":"metrics"},"password":{"title":"password","type":"string"},"read_buffer_size":{"description":"ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize.","title":"read_buffer_size","type":"integer"},"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.couchdbreceiver.internal.metadata.ResourceAttributesConfig","title":"resource_attributes"},"timeout":{"description":"Timeout parameter configures `http.Client.Timeout`.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"username":{"title":"username","type":"string"},"write_buffer_size":{"description":"WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize.","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.couchdbreceiver.internal.metadata.MetricConfig":{"additionalProperties":false,"description":"MetricConfig provides common config for a particular metric.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.couchdbreceiver.internal.metadata.MetricsConfig":{"additionalProperties":false,"description":"MetricsConfig provides config for couchdb metrics.","properties":{"couchdb.average_request_time":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.couchdbreceiver.internal.metadata.MetricConfig","title":"couchdb.average_request_time"},"couchdb.database.open":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.couchdbreceiver.internal.metadata.MetricConfig","title":"couchdb.database.open"},"couchdb.database.operations":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.couchdbreceiver.internal.metadata.MetricConfig","title":"couchdb.database.operations"},"couchdb.file_descriptor.open":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.couchdbreceiver.internal.metadata.MetricConfig","title":"couchdb.file_descriptor.open"},"couchdb.httpd.bulk_requests":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.couchdbreceiver.internal.metadata.MetricConfig","title":"couchdb.httpd.bulk_requests"},"couchdb.httpd.requests":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.couchdbreceiver.internal.metadata.MetricConfig","title":"couchdb.httpd.requests"},"couchdb.httpd.responses":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.couchdbreceiver.internal.metadata.MetricConfig","title":"couchdb.httpd.responses"},"couchdb.httpd.views":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.couchdbreceiver.internal.metadata.MetricConfig","title":"couchdb.httpd.views"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.couchdbreceiver.internal.metadata.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig provides common config for a particular resource attribute.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.couchdbreceiver.internal.metadata.ResourceAttributesConfig":{"additionalProperties":false,"description":"ResourceAttributesConfig provides config for couchdb resource attributes.","properties":{"couchdb.node.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.couchdbreceiver.internal.metadata.ResourceAttributeConfig","title":"couchdb.node.name"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.datadogreceiver.Config":{"additionalProperties":false,"markdownDescription":"# Datadog APM Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [alpha]: traces |\n| Distributions | [contrib], [sumo] |\n\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\n## Overview\nAccepts traces in the Datadog APM format.\n### Supported Datadog APIs\n\n- v0.3 (msgpack and json)\n- v0.4 (msgpack and json)\n- v0.5 (msgpack custom format)\n- v0.6\n- v0.7\n## Configuration\n\nExample:\n\n```yaml\nreceivers:\n datadog:\n endpoint: localhost:8126\n read_timeout: 60s\n```\n### read_timeout (Optional)\nThe read timeout of the HTTP Server\n\nDefault: 60s\n\n### HTTP Service Config\n\nAll config params here are valid as well\n\nhttps://github.com/open-telemetry/opentelemetry-collector/tree/main/config/confighttp#server-configuration","properties":{"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth for this receiver","title":"auth"},"cors":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.confighttp.CORSSettings","description":"CORS configures the server for HTTP cross-origin resource sharing (CORS).","title":"cors"},"endpoint":{"description":"Endpoint configures the listening address for the server.","title":"endpoint","type":"string"},"include_metadata":{"description":"IncludeMetadata propagates the client metadata from the incoming requests to the downstream consumers\nExperimental: *NOTE* this option is subject to change or removal in the future.","title":"include_metadata","type":"boolean"},"max_request_body_size":{"description":"MaxRequestBodySize sets the maximum request body size in bytes","title":"max_request_body_size","type":"integer"},"read_timeout":{"description":"ReadTimeout of the http server","title":"read_timeout","type":"string"},"response_headers":{"description":"Additional headers attached to each HTTP response sent to the client.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"response_headers","type":"object"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSServerSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.Config":{"additionalProperties":false,"markdownDescription":"# Docker Stats Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [alpha]: metrics |\n| Distributions | [contrib], [observiq], [sumo] |\n\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe Docker Stats receiver queries the local Docker daemon's container stats API for\nall desired running containers on a configured interval. These stats are for container\nresource usage of cpu, memory, network, and the\n[blkio controller](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt).\n\n\u003e :information_source: Requires Docker API version 1.22+ and only Linux is supported.\n\n## Configuration\n\nThe following settings are optional:\n\n- `endpoint` (default = `unix:///var/run/docker.sock`): Address to reach the desired Docker daemon.\n- `collection_interval` (default = `10s`): The interval at which to gather container stats.\n- `initial_delay` (default = `1s`): defines how long this receiver waits before starting.\n- `container_labels_to_metric_labels` (no default): A map of Docker container label names whose label values to use\nas the specified metric label key.\n- `env_vars_to_metric_labels` (no default): A map of Docker container environment variables whose values to use\nas the specified metric label key.\n- `excluded_images` (no default, all running containers monitored): A list of strings,\n[regexes](https://golang.org/pkg/regexp/), or [globs](https://github.com/gobwas/glob) whose referent container image\nnames will not be among the queried containers. `!`-prefixed negations are possible for all item types to signify that\nonly unmatched container image names should be excluded.\n - Regexes must be placed between `/` characters: `/my?egex/`. Negations are to be outside the forward slashes:\n `!/my?egex/` will exclude all containers whose name doesn't match the compiled regex `my?egex`.\n - Globs are non-regex items (e.g. `/items/`) containing any of the following: `*[]{}?`. Negations are supported:\n `!my*container` will exclude all containers whose image name doesn't match the blob `my*container`.\n- `timeout` (default = `5s`): The request timeout for any docker daemon query.\n- `api_version` (default = `1.22`): The Docker client API version (must be 1.22+). [Docker API versions](https://docs.docker.com/engine/api/).\n- `metrics` (defaults at [./documentation.md](./documentation.md)): Enables/disables individual metrics. See [./documentation.md](./documentation.md) for full detail.\n\nExample:\n\n```yaml\nreceivers:\n docker_stats:\n endpoint: http://example.com/\n collection_interval: 2s\n timeout: 20s\n api_version: 1.24\n container_labels_to_metric_labels:\n my.container.label: my-metric-label\n my.other.container.label: my-other-metric-label\n env_vars_to_metric_labels:\n MY_ENVIRONMENT_VARIABLE: my-metric-label\n MY_OTHER_ENVIRONMENT_VARIABLE: my-other-metric-label\n excluded_images:\n - undesired-container\n - /.*undesired.*/\n - another-*-container\n metrics: \n container.cpu.usage.percpu:\n enabled: true\n container.network.io.usage.tx_dropped:\n enabled: false\n```\n\nThe full list of settings exposed for this receiver are documented [here](./config.go)\nwith detailed sample configurations [here](./testdata/config.yaml).\n\n## Deprecations\n\n### Transition to cpu utilization metric name aligned with OpenTelemetry specification\n\nThe Docker Stats receiver has been emitting the following cpu memory metric:\n\n- [container.cpu.percent] for the percentage of CPU used by the container,\n\nThis is in conflict with the OpenTelemetry specification,\nwhich defines [container.cpu.utilization] as the name for this metric.\n\nTo align the emitted metric names with the OpenTelemetry specification,\nthe following process will be followed to phase out the old metrics:\n\n- Between `v0.79.0` and `v0.81.0`, the new metric is introduced and the old metric is marked as deprecated.\n Only the old metric are emitted by default.\n- Between `v0.82.0` and `v0.84.0`, the old metric is disabled and the new one enabled by default.\n- In `v0.85.0` and up, the old metric is removed.\n\nTo change the enabled state for the specific metrics, use the standard configuration options that are available for all metrics.\n\nHere's an example configuration to disable the old metrics and enable the new metrics:\n\n```yaml\nreceivers:\n docker_stats:\n metrics:\n container.cpu.percent:\n enabled: false\n container.cpu.utilization:\n enabled: true\n\n```\n\n### Migrating from ScraperV1 to ScraperV2\n\n*Note: These changes are now in effect and ScraperV1 have been removed as of v0.71.*\n\nThere are some breaking changes from ScraperV1 to ScraperV2. The work done for these changes is tracked in [#9794](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/9794).\n\n| Breaking Change | Action |\n|-------------------------------------|-------------------------------------------------------------------------|\n| Many metrics are no longer emitted by default. | See [documentation.md](./documentation.md) to see which metrics are enabled by default. Enable/disable as desired. |\n| BlockIO metrics names changed. The type of operation is no longer in the metric name suffix, and is now in an attribute. For example `container.blockio.io_merged_recursive.read` becomes `container.blockio.io_merged_recursive` with an `operation:read` attribute. | Be aware of the metric name changes and make any adjustments to what your downstream expects from BlockIO metrics. |\n| Memory metrics measured in Bytes are now [non-monotonic sums](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#opentelemetry-protocol-data-model-consumer-recommendations) instead of gauges. | Most likely there is no action. The aggregation type is different but the values are the same. Be aware of how your downstream handles gauges vs non-monotonic sums. |\n| Config option `provide_per_core_cpu_metrics` has been removed. | Enable the `container.cpu.usage.percpu` metric as per [documentation.md](./documentation.md). |","properties":{"api_version":{"description":"Docker client API version. Default is 1.22","title":"api_version","type":"number"},"collection_interval":{"title":"collection_interval","type":"string"},"container_labels_to_metric_labels":{"description":"A mapping of container label names to MetricDescriptor label keys.\nThe corresponding container label value will become the DataPoint label value\nfor the mapped name. E.g. `io.kubernetes.container.name: container_spec_name`\nwould result in a MetricDescriptor label called `container_spec_name` whose\nMetric DataPoints have the value of the `io.kubernetes.container.name` container label.","patternProperties":{".*":{"type":"string"}},"title":"container_labels_to_metric_labels","type":"object"},"endpoint":{"description":"The URL of the docker server. Default is \"unix:///var/run/docker.sock\"","title":"endpoint","type":"string"},"env_vars_to_metric_labels":{"description":"A mapping of container environment variable names to MetricDescriptor label\nkeys. The corresponding env var values become the DataPoint label value.\nE.g. `APP_VERSION: version` would result MetricDescriptors having a label\nkey called `version` whose DataPoint label values are the value of the\n`APP_VERSION` environment variable configured for that particular container, if\npresent.","patternProperties":{".*":{"type":"string"}},"title":"env_vars_to_metric_labels","type":"object"},"excluded_images":{"description":"A list of filters whose matching images are to be excluded. Supports literals, globs, and regex.","items":{"type":"string"},"title":"excluded_images","type":"array"},"initial_delay":{"title":"initial_delay","type":"string"},"metrics":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricsConfig","title":"metrics"},"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.ResourceAttributesConfig","title":"resource_attributes"},"timeout":{"description":"The maximum amount of time to wait for docker API responses. Default is 5s","title":"timeout","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig":{"additionalProperties":false,"description":"MetricConfig provides common config for a particular metric.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricsConfig":{"additionalProperties":false,"description":"MetricsConfig provides config for docker_stats metrics.","properties":{"container.blockio.io_merged_recursive":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.blockio.io_merged_recursive"},"container.blockio.io_queued_recursive":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.blockio.io_queued_recursive"},"container.blockio.io_service_bytes_recursive":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.blockio.io_service_bytes_recursive"},"container.blockio.io_service_time_recursive":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.blockio.io_service_time_recursive"},"container.blockio.io_serviced_recursive":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.blockio.io_serviced_recursive"},"container.blockio.io_time_recursive":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.blockio.io_time_recursive"},"container.blockio.io_wait_time_recursive":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.blockio.io_wait_time_recursive"},"container.blockio.sectors_recursive":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.blockio.sectors_recursive"},"container.cpu.percent":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.cpu.percent"},"container.cpu.throttling_data.periods":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.cpu.throttling_data.periods"},"container.cpu.throttling_data.throttled_periods":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.cpu.throttling_data.throttled_periods"},"container.cpu.throttling_data.throttled_time":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.cpu.throttling_data.throttled_time"},"container.cpu.usage.kernelmode":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.cpu.usage.kernelmode"},"container.cpu.usage.percpu":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.cpu.usage.percpu"},"container.cpu.usage.system":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.cpu.usage.system"},"container.cpu.usage.total":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.cpu.usage.total"},"container.cpu.usage.usermode":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.cpu.usage.usermode"},"container.cpu.utilization":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.cpu.utilization"},"container.memory.active_anon":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.active_anon"},"container.memory.active_file":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.active_file"},"container.memory.anon":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.anon"},"container.memory.cache":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.cache"},"container.memory.dirty":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.dirty"},"container.memory.file":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.file"},"container.memory.hierarchical_memory_limit":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.hierarchical_memory_limit"},"container.memory.hierarchical_memsw_limit":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.hierarchical_memsw_limit"},"container.memory.inactive_anon":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.inactive_anon"},"container.memory.inactive_file":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.inactive_file"},"container.memory.mapped_file":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.mapped_file"},"container.memory.percent":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.percent"},"container.memory.pgfault":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.pgfault"},"container.memory.pgmajfault":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.pgmajfault"},"container.memory.pgpgin":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.pgpgin"},"container.memory.pgpgout":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.pgpgout"},"container.memory.rss":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.rss"},"container.memory.rss_huge":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.rss_huge"},"container.memory.total_active_anon":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.total_active_anon"},"container.memory.total_active_file":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.total_active_file"},"container.memory.total_cache":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.total_cache"},"container.memory.total_dirty":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.total_dirty"},"container.memory.total_inactive_anon":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.total_inactive_anon"},"container.memory.total_inactive_file":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.total_inactive_file"},"container.memory.total_mapped_file":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.total_mapped_file"},"container.memory.total_pgfault":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.total_pgfault"},"container.memory.total_pgmajfault":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.total_pgmajfault"},"container.memory.total_pgpgin":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.total_pgpgin"},"container.memory.total_pgpgout":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.total_pgpgout"},"container.memory.total_rss":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.total_rss"},"container.memory.total_rss_huge":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.total_rss_huge"},"container.memory.total_unevictable":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.total_unevictable"},"container.memory.total_writeback":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.total_writeback"},"container.memory.unevictable":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.unevictable"},"container.memory.usage.limit":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.usage.limit"},"container.memory.usage.max":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.usage.max"},"container.memory.usage.total":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.usage.total"},"container.memory.writeback":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.writeback"},"container.network.io.usage.rx_bytes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.network.io.usage.rx_bytes"},"container.network.io.usage.rx_dropped":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.network.io.usage.rx_dropped"},"container.network.io.usage.rx_errors":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.network.io.usage.rx_errors"},"container.network.io.usage.rx_packets":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.network.io.usage.rx_packets"},"container.network.io.usage.tx_bytes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.network.io.usage.tx_bytes"},"container.network.io.usage.tx_dropped":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.network.io.usage.tx_dropped"},"container.network.io.usage.tx_errors":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.network.io.usage.tx_errors"},"container.network.io.usage.tx_packets":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.network.io.usage.tx_packets"},"container.pids.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.pids.count"},"container.pids.limit":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.pids.limit"},"container.uptime":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.MetricConfig","title":"container.uptime"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig provides common config for a particular resource attribute.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.ResourceAttributesConfig":{"additionalProperties":false,"description":"ResourceAttributesConfig provides config for docker_stats resource attributes.","properties":{"container.command_line":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.ResourceAttributeConfig","title":"container.command_line"},"container.hostname":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.ResourceAttributeConfig","title":"container.hostname"},"container.id":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.ResourceAttributeConfig","title":"container.id"},"container.image.id":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.ResourceAttributeConfig","title":"container.image.id"},"container.image.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.ResourceAttributeConfig","title":"container.image.name"},"container.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.ResourceAttributeConfig","title":"container.name"},"container.runtime":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.internal.metadata.ResourceAttributeConfig","title":"container.runtime"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dotnetdiagnosticsreceiver.Config":{"additionalProperties":false,"markdownDescription":"## Dotnet Diagnostics Receiver\n\n| Status | |\n| ------------------------ |----------------|\n| Stability | [deprecated] |\n| Supported pipeline types | metrics |\n| Distributions | [contrib] |\n\nThis receiver provides a capability similar to the\n[dotnet-counters](https://docs.microsoft.com/en-us/dotnet/core/diagnostics/dotnet-counters)\ntool, which takes a .NET process ID and reads metrics from that process,\nproviding them to the CLI. Similarly, this receiver reads metrics from a given\n.NET process, translating them and providing them to the Collector.\n\n#### .NET Counters Overview\n\nThe .NET runtime makes available metrics to interested clients over an IPC\nconnection, listening for requests and responding with metrics sent at a\nspecified interval. All .NET processes newer than 3.0 make available both\ndefault metrics (grouped under the name `System.Runtime`) and any custom metrics\ngenerated via the EventCounter\n[API](https://docs.microsoft.com/en-us/dotnet/api/system.diagnostics.tracing.eventcounter?view=net-5.0)\n.\n\nOnce a .NET process is running, a client (such as this receiver) may connect to\nit over IPC, which is either a _Unix domain socket_ on Linux/macOS, or a _named\npipe_ on Windows. After connecting, the client sends the process a request,\nusing a custom binary encoding, indicating both the counters it's interested in\nand the collection interval, then waits for data. If the request is successful,\nthe .NET process sends metrics, also using a custom binary encoding, over the\nIPC connection, at the specified interval.\n\n#### Operation\n\nAt startup, this recevier looks for a file in `TMPDIR` (or `/tmp` if not set)\ncorresponding to the given PID and a naming convention. If found, a Unix domain\nsocket connection is opened, using the file as the endpoint, and a request is\nmade to the dotnet process for metrics, with the given (in the config)\ncollection interval and counters.\n\nAfter that, it listens for metrics arriving from the connection, and sends them\nto the next consumer as soon as they arrive. If the connection fails, or an\nunexpected value is read, the receiver shuts down.\n\n#### Configuration\n\nThis receiver accepts three configuration fields: `collection_interval`,\n`pid`, and `counters`.\n\n| Field Name | Description | Example | Default |\n| ---------- | ----------- | ------- | ------- |\n| `collection_interval` | The interval between metric collection (converted to seconds) | `1m` | `1s`\n| `pid` | The process ID of the .NET process from which to collect metrics | `1001` | |\n| `counters` | A list of counter groups (sometimes referred to as _providers_ or _event sources_) to request from the .NET process | `[\"MyCounters\"]` | `[\"System.Runtime\", \"Microsoft.AspNetCore.Hosting\"]` |\n\nExample yaml config:\n\n```yaml\nreceivers:\n dotnet_diagnostics:\n collection_interval: 10s\n pid: 23860\n counters: [ \"MyCounters\", \"System.Runtime\" ]\nexporters:\n logging:\n loglevel: info\nservice:\n pipelines:\n metrics:\n receivers: [ dotnet_diagnostics ]\n exporters: [ logging ]\n```\n\n#### Usage With Receiver Creator\n\nIt is possible to create a config file for this receiver with a hard-coded\nprocess id, but it is expected that this receiver will often be used with a\nreceiver creator, and a host observer, to discover .NET processes at runtime.\n\nExample receiver creator config:\n\n```yaml\nextensions:\n host_observer:\nreceivers:\n receiver_creator:\n watch_observers: [ host_observer ]\n receivers:\n dotnet_diagnostics:\n rule: type.hostport \u0026\u0026 process_name == 'dotnet'\n config:\n pid: \"`process_id`\"\nexporters:\n logging:\n loglevel: info\nservice:\n extensions: [ host_observer ]\n pipelines:\n metrics:\n receivers: [ receiver_creator ]\n exporters: [ logging ]\n```\n\n#### Supported Versions\n\nThis receiver is compatible with .NET Core 3.0 and later versions, running on Linux or\nmacOS. Windows is not yet supported.\n\n#### External Resources\n\nhttps://github.com/dotnet/diagnostics/blob/master/documentation/design-docs/ipc-protocol.md\n\nhttps://github.com/Microsoft/perfview/blob/main/src/TraceEvent/EventPipe/EventPipeFormat.md\n\n[deprecated]: https://github.com/open-telemetry/opentelemetry-collector#deprecated\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib","properties":{"collection_interval":{"title":"collection_interval","type":"string"},"counters":{"description":"A list of counters for the dotnet process to send to the collector. Defaults\nto [\"System.Runtime\", \"Microsoft.AspNetCore.Hosting\"]. Available counters can\nbe displayed by the `dotnet-counters` tool:\nhttps://docs.microsoft.com/en-us/dotnet/core/diagnostics/dotnet-counters","items":{"type":"string"},"title":"counters","type":"array"},"initial_delay":{"title":"initial_delay","type":"string"},"local_debug_dir":{"description":"LocalDebugDir takes an optional directory name where stream data can be written for\noffline analysis and troubleshooting. If LocalDebugDir is empty, no stream data is\nwritten. If it has a value, MaxLocalDebugFiles also needs to be set, and stream\ndata will be written to disk at the specified location using the naming\nconvention `msg.%d.bin` as each message is received, where %d is the current\nmessage number.","title":"local_debug_dir","type":"string"},"max_local_debug_files":{"description":"MaxLocalDebugFiles indicates the maximum number of files kept in LocalDebugDir. When a\nfile is written, the oldest one will be deleted if necessary to keep the\nnumber of files in LocalDebugDir at the specified maximum.","title":"max_local_debug_files","type":"integer"},"pid":{"description":"The process ID of the dotnet process from which to collect diagnostics. This\nprocess ID is used to generate the file glob \"dotnet-diagnostic-%d-*-socket\"\nto locate a file in TMPDIR (or \"/tmp\" if unset). If the file is found, it is\nused as a Unix domain socket (on Linux/Mac) to communicate with the dotnet\nprocess. For ease of use, this receiver is intended to be used with an\nobserver and receiver creator for process discovery and receiver creation.","title":"pid","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.Config":{"additionalProperties":false,"description":"Config is the configuration for the elasticsearch receiver","markdownDescription":"# Elasticsearch Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: metrics |\n| Distributions | [contrib], [observiq], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThis receiver queries the Elasticsearch [node stats](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html), [cluster health](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) and [index stats](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-stats.html) endpoints in order to scrape metrics from a running elasticsearch cluster.\n\n## Prerequisites\n\nThis receiver supports Elasticsearch versions 7.9+\n\nIf Elasticsearch security features are enabled, you must have either the `monitor` or `manage` cluster privilege.\nSee the [Elasticsearch docs](https://www.elastic.co/guide/en/elasticsearch/reference/current/authorization.html) for more information on authorization and [Security privileges](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-privileges.html).\n\n## Configuration\n\nThe following settings are optional:\n\n- `metrics` (default: see `DefaultMetricsSettings` [here](./internal/metadata/generated_metrics.go): Allows enabling and disabling specific metrics from being collected in this receiver.\n- `nodes` (default: `[\"_all\"]`): Allows specifying node filters that define which nodes are scraped for node-level and cluster-level metrics. See [the Elasticsearch documentation](https://www.elastic.co/guide/en/elasticsearch/reference/7.9/cluster.html#cluster-nodes) for allowed filters. If this option is left explicitly empty, then no node-level metrics will be scraped and cluster-level metrics will scrape only metrics related to cluster's health.\n- `skip_cluster_metrics` (default: `false`): If true, cluster-level metrics will not be scraped.\n- `indices` (default: `[\"_all\"]`): Allows specifying index filters that define which indices are scraped for index-level metrics. See [the Elasticsearch documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-stats.html#index-stats-api-path-params) for allowed filters. If this option is left explicitly empty, then no index-level metrics will be scraped.\n- `endpoint` (default = `http://localhost:9200`): The base URL of the Elasticsearch API for the cluster to monitor.\n- `username` (no default): Specifies the username used to authenticate with Elasticsearch using basic auth. Must be specified if password is specified.\n- `password` (no default): Specifies the password used to authenticate with Elasticsearch using basic auth. Must be specified if username is specified.\n- `collection_interval` (default = `10s`): This receiver collects metrics on an interval. This value must be a string readable by Golang's [time.ParseDuration](https://pkg.go.dev/time#ParseDuration). On larger clusters, the interval may need to be lengthened, as querying Elasticsearch for metrics will take longer on clusters with more nodes.\n- `initial_delay` (default = `1s`): defines how long this receiver waits before starting.\n\n### Example Configuration\n\n```yaml\nreceivers:\n elasticsearch:\n metrics:\n elasticsearch.node.fs.disk.available:\n enabled: false\n nodes: [\"_local\"]\n skip_cluster_metrics: true\n indices: [\".geoip_databases\"]\n endpoint: http://localhost:9200\n username: otel\n password: password\n collection_interval: 10s\n```\n\nThe full list of settings exposed for this receiver are documented [here](./config.go) with detailed sample configurations [here](./testdata/config.yaml).\n\n## Metrics\n\nThe following metric are available with versions:\n\n- `elasticsearch.indexing_pressure.memory.limit` \u003e= [7.10](https://www.elastic.co/guide/en/elasticsearch/reference/7.16/release-notes-7.10.0.html)\n- `elasticsearch.node.shards.data_set.size` \u003e= [7.13](https://www.elastic.co/guide/en/elasticsearch/reference/7.16/release-notes-7.13.0.html)\n- `elasticsearch.cluster.state_update.count` \u003e= [7.16.0](https://www.elastic.co/guide/en/elasticsearch/reference/7.16/release-notes-7.16.0.html)\n- `elasticsearch.cluster.state_update.time` \u003e= [7.16.0](https://www.elastic.co/guide/en/elasticsearch/reference/7.16/release-notes-7.16.0.html)\n\nDetails about the metrics produced by this receiver can be found in [metadata.yaml](./metadata.yaml)\n\n## Feature gate configurations\n\nSee the [Collector feature gates](https://github.com/open-telemetry/opentelemetry-collector/blob/main/featuregate/README.md#collector-feature-gates) for an overview of feature gates in the collector.\n\n**BETA**: `receiver.elasticsearch.emitNodeVersionAttr`\n\nThe feature gate `receiver.elasticsearch.emitNodeVersionAttr` when enabled will enrich all node metrics with an\nresource attribute representing the node version.\n\nThis feature gate is enabled by default, and eventually the old implementation will be removed. It aims to give users time\nto migrate to the new implementation. The target release for this featuregate to be permanently enabled is 0.82.0.","properties":{"CustomRoundTripper":{"description":"Custom Round Tripper to allow for individual components to intercept HTTP requests","title":"CustomRoundTripper"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing HTTP calls.","title":"auth"},"collection_interval":{"title":"collection_interval","type":"string"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"endpoint":{"description":"The target URL to send data to (e.g.: http://some.url:9411/v1/traces).","title":"endpoint","type":"string"},"headers":{"description":"Additional headers attached to each HTTP request sent by the client.\nExisting header values are overwritten if collision happens.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"idle_conn_timeout":{"description":"IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"idle_conn_timeout","type":"string"},"indices":{"description":"Indices defines the indices to scrape.\nSee https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-stats.html#index-stats-api-path-params\nfor which names are viable.\nIf Indices is empty, no indices will be scraped.","items":{"type":"string"},"title":"indices","type":"array"},"initial_delay":{"title":"initial_delay","type":"string"},"max_conns_per_host":{"description":"MaxConnsPerHost limits the total number of connections per host, including connections in the dialing,\nactive, and idle states.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_conns_per_host","type":"integer"},"max_idle_conns":{"description":"MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns","type":"integer"},"max_idle_conns_per_host":{"description":"MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns_per_host","type":"integer"},"metrics":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricsConfig","title":"metrics"},"nodes":{"description":"Nodes defines the nodes to scrape.\nSee https://www.elastic.co/guide/en/elasticsearch/reference/7.9/cluster.html#cluster-nodes for which selectors may be used here.\nIf Nodes is empty, no nodes will be scraped.","items":{"type":"string"},"title":"nodes","type":"array"},"password":{"description":"Password is the password used when making REST calls to elasticsearch. Must be specified if Username is. Not required.","title":"password","type":"string"},"read_buffer_size":{"description":"ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize.","title":"read_buffer_size","type":"integer"},"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.ResourceAttributesConfig","title":"resource_attributes"},"skip_cluster_metrics":{"description":"SkipClusterMetrics indicates whether cluster level metrics from /_cluster/* endpoints should be scraped or not.","title":"skip_cluster_metrics","type":"boolean"},"timeout":{"description":"Timeout parameter configures `http.Client.Timeout`.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"username":{"description":"Username is the username used when making REST calls to elasticsearch. Must be specified if Password is. Not required.","title":"username","type":"string"},"write_buffer_size":{"description":"WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize.","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig":{"additionalProperties":false,"description":"MetricConfig provides common config for a particular metric.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricsConfig":{"additionalProperties":false,"description":"MetricsConfig provides config for elasticsearch metrics.","properties":{"elasticsearch.breaker.memory.estimated":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.breaker.memory.estimated"},"elasticsearch.breaker.memory.limit":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.breaker.memory.limit"},"elasticsearch.breaker.tripped":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.breaker.tripped"},"elasticsearch.cluster.data_nodes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.cluster.data_nodes"},"elasticsearch.cluster.health":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.cluster.health"},"elasticsearch.cluster.in_flight_fetch":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.cluster.in_flight_fetch"},"elasticsearch.cluster.indices.cache.evictions":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.cluster.indices.cache.evictions"},"elasticsearch.cluster.nodes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.cluster.nodes"},"elasticsearch.cluster.pending_tasks":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.cluster.pending_tasks"},"elasticsearch.cluster.published_states.differences":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.cluster.published_states.differences"},"elasticsearch.cluster.published_states.full":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.cluster.published_states.full"},"elasticsearch.cluster.shards":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.cluster.shards"},"elasticsearch.cluster.state_queue":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.cluster.state_queue"},"elasticsearch.cluster.state_update.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.cluster.state_update.count"},"elasticsearch.cluster.state_update.time":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.cluster.state_update.time"},"elasticsearch.index.cache.evictions":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.index.cache.evictions"},"elasticsearch.index.cache.memory.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.index.cache.memory.usage"},"elasticsearch.index.cache.size":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.index.cache.size"},"elasticsearch.index.documents":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.index.documents"},"elasticsearch.index.operations.completed":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.index.operations.completed"},"elasticsearch.index.operations.merge.docs_count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.index.operations.merge.docs_count"},"elasticsearch.index.operations.merge.size":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.index.operations.merge.size"},"elasticsearch.index.operations.time":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.index.operations.time"},"elasticsearch.index.segments.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.index.segments.count"},"elasticsearch.index.segments.memory":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.index.segments.memory"},"elasticsearch.index.segments.size":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.index.segments.size"},"elasticsearch.index.shards.size":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.index.shards.size"},"elasticsearch.index.translog.operations":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.index.translog.operations"},"elasticsearch.index.translog.size":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.index.translog.size"},"elasticsearch.indexing_pressure.memory.limit":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.indexing_pressure.memory.limit"},"elasticsearch.indexing_pressure.memory.total.primary_rejections":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.indexing_pressure.memory.total.primary_rejections"},"elasticsearch.indexing_pressure.memory.total.replica_rejections":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.indexing_pressure.memory.total.replica_rejections"},"elasticsearch.memory.indexing_pressure":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.memory.indexing_pressure"},"elasticsearch.node.cache.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.node.cache.count"},"elasticsearch.node.cache.evictions":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.node.cache.evictions"},"elasticsearch.node.cache.memory.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.node.cache.memory.usage"},"elasticsearch.node.cache.size":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.node.cache.size"},"elasticsearch.node.cluster.connections":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.node.cluster.connections"},"elasticsearch.node.cluster.io":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.node.cluster.io"},"elasticsearch.node.disk.io.read":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.node.disk.io.read"},"elasticsearch.node.disk.io.write":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.node.disk.io.write"},"elasticsearch.node.documents":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.node.documents"},"elasticsearch.node.fs.disk.available":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.node.fs.disk.available"},"elasticsearch.node.fs.disk.free":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.node.fs.disk.free"},"elasticsearch.node.fs.disk.total":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.node.fs.disk.total"},"elasticsearch.node.http.connections":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.node.http.connections"},"elasticsearch.node.ingest.documents":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.node.ingest.documents"},"elasticsearch.node.ingest.documents.current":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.node.ingest.documents.current"},"elasticsearch.node.ingest.operations.failed":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.node.ingest.operations.failed"},"elasticsearch.node.open_files":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.node.open_files"},"elasticsearch.node.operations.completed":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.node.operations.completed"},"elasticsearch.node.operations.current":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.node.operations.current"},"elasticsearch.node.operations.get.completed":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.node.operations.get.completed"},"elasticsearch.node.operations.get.time":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.node.operations.get.time"},"elasticsearch.node.operations.time":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.node.operations.time"},"elasticsearch.node.pipeline.ingest.documents.current":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.node.pipeline.ingest.documents.current"},"elasticsearch.node.pipeline.ingest.documents.preprocessed":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.node.pipeline.ingest.documents.preprocessed"},"elasticsearch.node.pipeline.ingest.operations.failed":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.node.pipeline.ingest.operations.failed"},"elasticsearch.node.script.cache_evictions":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.node.script.cache_evictions"},"elasticsearch.node.script.compilation_limit_triggered":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.node.script.compilation_limit_triggered"},"elasticsearch.node.script.compilations":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.node.script.compilations"},"elasticsearch.node.segments.memory":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.node.segments.memory"},"elasticsearch.node.shards.data_set.size":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.node.shards.data_set.size"},"elasticsearch.node.shards.reserved.size":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.node.shards.reserved.size"},"elasticsearch.node.shards.size":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.node.shards.size"},"elasticsearch.node.thread_pool.tasks.finished":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.node.thread_pool.tasks.finished"},"elasticsearch.node.thread_pool.tasks.queued":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.node.thread_pool.tasks.queued"},"elasticsearch.node.thread_pool.threads":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.node.thread_pool.threads"},"elasticsearch.node.translog.operations":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.node.translog.operations"},"elasticsearch.node.translog.size":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.node.translog.size"},"elasticsearch.node.translog.uncommitted.size":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.node.translog.uncommitted.size"},"elasticsearch.os.cpu.load_avg.15m":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.os.cpu.load_avg.15m"},"elasticsearch.os.cpu.load_avg.1m":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.os.cpu.load_avg.1m"},"elasticsearch.os.cpu.load_avg.5m":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.os.cpu.load_avg.5m"},"elasticsearch.os.cpu.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.os.cpu.usage"},"elasticsearch.os.memory":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.os.memory"},"elasticsearch.process.cpu.time":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.process.cpu.time"},"elasticsearch.process.cpu.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.process.cpu.usage"},"elasticsearch.process.memory.virtual":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"elasticsearch.process.memory.virtual"},"jvm.classes.loaded":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"jvm.classes.loaded"},"jvm.gc.collections.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"jvm.gc.collections.count"},"jvm.gc.collections.elapsed":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"jvm.gc.collections.elapsed"},"jvm.memory.heap.committed":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"jvm.memory.heap.committed"},"jvm.memory.heap.max":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"jvm.memory.heap.max"},"jvm.memory.heap.used":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"jvm.memory.heap.used"},"jvm.memory.heap.utilization":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"jvm.memory.heap.utilization"},"jvm.memory.nonheap.committed":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"jvm.memory.nonheap.committed"},"jvm.memory.nonheap.used":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"jvm.memory.nonheap.used"},"jvm.memory.pool.max":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"jvm.memory.pool.max"},"jvm.memory.pool.used":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"jvm.memory.pool.used"},"jvm.threads.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.MetricConfig","title":"jvm.threads.count"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig provides common config for a particular resource attribute.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.ResourceAttributesConfig":{"additionalProperties":false,"description":"ResourceAttributesConfig provides config for elasticsearch resource attributes.","properties":{"elasticsearch.cluster.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.ResourceAttributeConfig","title":"elasticsearch.cluster.name"},"elasticsearch.index.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.ResourceAttributeConfig","title":"elasticsearch.index.name"},"elasticsearch.node.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.ResourceAttributeConfig","title":"elasticsearch.node.name"},"elasticsearch.node.version":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.internal.metadata.ResourceAttributeConfig","title":"elasticsearch.node.version"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.expvarreceiver.Config":{"additionalProperties":false,"markdownDescription":"# Expvar Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [alpha]: metrics |\n| Distributions | [contrib], [sumo] |\n\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nAn Expvar Receiver scrapes metrics from [expvar](https://pkg.go.dev/expvar), \nwhich exposes data in JSON format from an HTTP endpoint. The metrics are \nextracted from the `expvar` variable [memstats](https://pkg.go.dev/runtime#MemStats), \nwhich exposes various information about the Go runtime.\n\n## Configuration \n\n### Default\n\nBy default, without any configuration, a request will be sent to `http://localhost:8000/debug/vars` \nevery 60 seconds. The default configuration is achieved by the following:\n\n```yaml\nreceivers:\n expvar:\n```\n\n### Customising\n\nThe following can be configured:\n- Configure the HTTP client for scraping the expvar variables. The full set of\n configuration options for the client can be found in the core repo's\n [confighttp](https://github.com/open-telemetry/opentelemetry-collector/tree/main/config/confighttp#client-configuration).\n - defaults: \n - `endpoint = http://localhost:8000/debug/vars` \n - `timeout = 3s`\n- `collection_interval` - Configure how often the metrics are scraped.\n - default: 1m\n- `initial_delay` (default = `1s`): defines how long this receiver waits before starting.\n- `metrics` - Enable or disable metrics by name.\n\n### Example configuration\n\n```yaml\nreceivers:\n expvar:\n endpoint: \"http://localhost:8000/custom/path\"\n timeout: 1s\n collection_interval: 30s\n metrics:\n process.runtime.memstats.total_alloc:\n enabled: true\n process.runtime.memstats.mallocs:\n enabled: false\n```","properties":{"CustomRoundTripper":{"description":"Custom Round Tripper to allow for individual components to intercept HTTP requests","title":"CustomRoundTripper"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing HTTP calls.","title":"auth"},"collection_interval":{"title":"collection_interval","type":"string"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"endpoint":{"description":"The target URL to send data to (e.g.: http://some.url:9411/v1/traces).","title":"endpoint","type":"string"},"headers":{"description":"Additional headers attached to each HTTP request sent by the client.\nExisting header values are overwritten if collision happens.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"idle_conn_timeout":{"description":"IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"idle_conn_timeout","type":"string"},"initial_delay":{"title":"initial_delay","type":"string"},"max_conns_per_host":{"description":"MaxConnsPerHost limits the total number of connections per host, including connections in the dialing,\nactive, and idle states.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_conns_per_host","type":"integer"},"max_idle_conns":{"description":"MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns","type":"integer"},"max_idle_conns_per_host":{"description":"MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns_per_host","type":"integer"},"metrics":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.expvarreceiver.internal.metadata.MetricsConfig","title":"metrics"},"read_buffer_size":{"description":"ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize.","title":"read_buffer_size","type":"integer"},"timeout":{"description":"Timeout parameter configures `http.Client.Timeout`.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"write_buffer_size":{"description":"WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize.","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.expvarreceiver.internal.metadata.MetricConfig":{"additionalProperties":false,"description":"MetricConfig provides common config for a particular metric.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.expvarreceiver.internal.metadata.MetricsConfig":{"additionalProperties":false,"description":"MetricsConfig provides config for expvar metrics.","properties":{"process.runtime.memstats.buck_hash_sys":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.expvarreceiver.internal.metadata.MetricConfig","title":"process.runtime.memstats.buck_hash_sys"},"process.runtime.memstats.frees":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.expvarreceiver.internal.metadata.MetricConfig","title":"process.runtime.memstats.frees"},"process.runtime.memstats.gc_cpu_fraction":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.expvarreceiver.internal.metadata.MetricConfig","title":"process.runtime.memstats.gc_cpu_fraction"},"process.runtime.memstats.gc_sys":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.expvarreceiver.internal.metadata.MetricConfig","title":"process.runtime.memstats.gc_sys"},"process.runtime.memstats.heap_alloc":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.expvarreceiver.internal.metadata.MetricConfig","title":"process.runtime.memstats.heap_alloc"},"process.runtime.memstats.heap_idle":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.expvarreceiver.internal.metadata.MetricConfig","title":"process.runtime.memstats.heap_idle"},"process.runtime.memstats.heap_inuse":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.expvarreceiver.internal.metadata.MetricConfig","title":"process.runtime.memstats.heap_inuse"},"process.runtime.memstats.heap_objects":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.expvarreceiver.internal.metadata.MetricConfig","title":"process.runtime.memstats.heap_objects"},"process.runtime.memstats.heap_released":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.expvarreceiver.internal.metadata.MetricConfig","title":"process.runtime.memstats.heap_released"},"process.runtime.memstats.heap_sys":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.expvarreceiver.internal.metadata.MetricConfig","title":"process.runtime.memstats.heap_sys"},"process.runtime.memstats.last_pause":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.expvarreceiver.internal.metadata.MetricConfig","title":"process.runtime.memstats.last_pause"},"process.runtime.memstats.lookups":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.expvarreceiver.internal.metadata.MetricConfig","title":"process.runtime.memstats.lookups"},"process.runtime.memstats.mallocs":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.expvarreceiver.internal.metadata.MetricConfig","title":"process.runtime.memstats.mallocs"},"process.runtime.memstats.mcache_inuse":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.expvarreceiver.internal.metadata.MetricConfig","title":"process.runtime.memstats.mcache_inuse"},"process.runtime.memstats.mcache_sys":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.expvarreceiver.internal.metadata.MetricConfig","title":"process.runtime.memstats.mcache_sys"},"process.runtime.memstats.mspan_inuse":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.expvarreceiver.internal.metadata.MetricConfig","title":"process.runtime.memstats.mspan_inuse"},"process.runtime.memstats.mspan_sys":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.expvarreceiver.internal.metadata.MetricConfig","title":"process.runtime.memstats.mspan_sys"},"process.runtime.memstats.next_gc":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.expvarreceiver.internal.metadata.MetricConfig","title":"process.runtime.memstats.next_gc"},"process.runtime.memstats.num_forced_gc":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.expvarreceiver.internal.metadata.MetricConfig","title":"process.runtime.memstats.num_forced_gc"},"process.runtime.memstats.num_gc":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.expvarreceiver.internal.metadata.MetricConfig","title":"process.runtime.memstats.num_gc"},"process.runtime.memstats.other_sys":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.expvarreceiver.internal.metadata.MetricConfig","title":"process.runtime.memstats.other_sys"},"process.runtime.memstats.pause_total":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.expvarreceiver.internal.metadata.MetricConfig","title":"process.runtime.memstats.pause_total"},"process.runtime.memstats.stack_inuse":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.expvarreceiver.internal.metadata.MetricConfig","title":"process.runtime.memstats.stack_inuse"},"process.runtime.memstats.stack_sys":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.expvarreceiver.internal.metadata.MetricConfig","title":"process.runtime.memstats.stack_sys"},"process.runtime.memstats.sys":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.expvarreceiver.internal.metadata.MetricConfig","title":"process.runtime.memstats.sys"},"process.runtime.memstats.total_alloc":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.expvarreceiver.internal.metadata.MetricConfig","title":"process.runtime.memstats.total_alloc"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.filelogreceiver.FileLogConfig":{"additionalProperties":false,"description":"FileLogConfig defines configuration for the filelog receiver","properties":{"attributes":{"patternProperties":{".*":{"type":"string"}},"title":"attributes","type":"object"},"delete_after_read":{"title":"delete_after_read","type":"boolean"},"encoding":{"title":"encoding","type":"string"},"exclude":{"items":{"type":"string"},"title":"exclude","type":"array"},"fingerprint_size":{"title":"fingerprint_size","type":"integer"},"force_flush_period":{"title":"force_flush_period","type":"string"},"header":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.pkg.stanza.fileconsumer.HeaderConfig","title":"header"},"id":{"title":"id","type":"string"},"include":{"items":{"type":"string"},"title":"include","type":"array"},"include_file_name":{"title":"include_file_name","type":"boolean"},"include_file_name_resolved":{"title":"include_file_name_resolved","type":"boolean"},"include_file_path":{"title":"include_file_path","type":"boolean"},"include_file_path_resolved":{"title":"include_file_path_resolved","type":"boolean"},"max_batches":{"title":"max_batches","type":"integer"},"max_concurrent_files":{"title":"max_concurrent_files","type":"integer"},"max_log_size":{"title":"max_log_size","type":"integer"},"multiline":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.pkg.stanza.operator.helper.MultilineConfig","title":"multiline"},"operators":{"items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.pkg.stanza.operator.Config"},"title":"operators","type":"array"},"ordering_criteria":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.pkg.stanza.fileconsumer.OrderingCriteria","title":"ordering_criteria"},"output":{"items":{"type":"string"},"title":"output","type":"array"},"poll_interval":{"title":"poll_interval","type":"string"},"preserve_leading_whitespaces":{"title":"preserve_leading_whitespaces","type":"boolean"},"preserve_trailing_whitespaces":{"title":"preserve_trailing_whitespaces","type":"boolean"},"resource":{"patternProperties":{".*":{"type":"string"}},"title":"resource","type":"object"},"retry_on_failure":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.internal.coreinternal.consumerretry.Config","title":"retry_on_failure"},"start_at":{"title":"start_at","type":"string"},"storage":{"title":"storage","type":"string"},"type":{"title":"type","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.filereceiver.Config":{"additionalProperties":false,"description":"Config defines the configuration for the file receiver.","markdownDescription":"# File Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [development]: metrics, traces, logs |\n| Distributions | [contrib] |\n\n[development]: https://github.com/open-telemetry/opentelemetry-collector#development\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n\u003c!-- end autogenerated section --\u003e\n\nThe File Receiver reads the output of a\n[File Exporter](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/fileexporter),\nconverting that output to metrics, and sending the metrics down the pipeline.\n\nCurrently, the only file format supported is the File Exporter's JSON format. Reading compressed output, rotated files,\nor telemetry other than metrics are not supported at this time.\n\n## Getting Started\n\nThe following setting is required:\n\n- `path` [no default]: the file in the same format as written by a File Exporter.\n\nThe following setting is optional:\n\n- `throttle` [default: 1]: a determines how fast telemetry is replayed. A value of `0` means\n that it will be replayed as fast as the system will allow. A value of `1` means that it will\n be replayed at the same rate as the data came in, as indicated by the timestamps on the\n input file's telemetry data. Higher values mean that the replay speed will be slower by a\n multiple of the throttle value. Values can be decimals, e.g. `0.5` means that telemetry will be\n replayed at 2x the rate indicated by the telemetry's timestamps.\n\n## Example\n\n```yaml\nreceivers:\n file:\n path: my-telemetry-file\n throttle: 0.5\n```","properties":{"path":{"description":"Path of the file to read from. Path is relative to current directory.","title":"path","type":"string"},"throttle":{"description":"Throttle determines how fast telemetry is replayed. A value of zero means\nthat it will be replayed as fast as the system will allow. A value of 1 means\nthat it will be replayed at the same rate as the data came in, as indicated\nby the timestamps on the input file's telemetry data. Higher values mean that\nreplay will be slower by a corresponding amount. Use a value between 0 and 1\nto replay telemetry at a higher speed. Default: 1.","title":"throttle","type":"number"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.filestatsreceiver.Config":{"additionalProperties":false,"markdownDescription":"# File Stats Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [alpha]: metrics |\n| Distributions | [contrib], [sumo] |\n\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe File Stats receiver collects metrics from files specified with a glob pattern.\n\n## Configuration\n- `include` (required): The glob path for files to watch\n- `collection_interval` (default = `1m`): The interval at which metrics are emitted by this receiver.\n- `initial_delay` (default = `1s`): defines how long this receiver waits before starting.\n\nSee [documentation.md] for a list of the metrics collected.","properties":{"collection_interval":{"title":"collection_interval","type":"string"},"include":{"title":"include","type":"string"},"initial_delay":{"title":"initial_delay","type":"string"},"metrics":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.filestatsreceiver.internal.metadata.MetricsConfig","title":"metrics"},"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.filestatsreceiver.internal.metadata.ResourceAttributesConfig","title":"resource_attributes"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.filestatsreceiver.internal.metadata.MetricConfig":{"additionalProperties":false,"description":"MetricConfig provides common config for a particular metric.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.filestatsreceiver.internal.metadata.MetricsConfig":{"additionalProperties":false,"description":"MetricsConfig provides config for filestats metrics.","properties":{"file.atime":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.filestatsreceiver.internal.metadata.MetricConfig","title":"file.atime"},"file.ctime":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.filestatsreceiver.internal.metadata.MetricConfig","title":"file.ctime"},"file.mtime":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.filestatsreceiver.internal.metadata.MetricConfig","title":"file.mtime"},"file.size":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.filestatsreceiver.internal.metadata.MetricConfig","title":"file.size"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.filestatsreceiver.internal.metadata.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig provides common config for a particular resource attribute.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.filestatsreceiver.internal.metadata.ResourceAttributesConfig":{"additionalProperties":false,"description":"ResourceAttributesConfig provides config for filestats resource attributes.","properties":{"file.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.filestatsreceiver.internal.metadata.ResourceAttributeConfig","title":"file.name"},"file.path":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.filestatsreceiver.internal.metadata.ResourceAttributeConfig","title":"file.path"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.Config":{"additionalProperties":false,"description":"Config defines the configuration for the various elements of the receiver agent.","markdownDescription":"# FlinkMetrics Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [alpha]: metrics |\n| Distributions | [contrib], [observiq], [sumo] |\n\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThis receiver uses Flink's [REST API](https://nightlies.apache.org/flink/flink-docs-release-1.14/docs/ops/metrics/#rest-api-integration) to collect Jobmanager, Taskmanager, Job, Task and Operator metrics.\n\n## Prerequisites\n\nThis receiver supports Apache Flink versions `1.13.6` and `1.14.4`.\n\nBy default, authentication is not required. However, [Flink recommends](https://nightlies.apache.org/flink/flink-docs-master/docs/deployment/security/security-ssl/#external--rest-connectivity) using a “side car proxy” that Binds the REST endpoint to the loopback interface and to start a REST proxy that authenticates and forwards the request to Flink.\n\n[SSL](https://nightlies.apache.org/flink/flink-docs-master/docs/deployment/security/security-ssl/#external--rest-connectivity) can be enabled with the following REST endpoint [options](https://nightlies.apache.org/flink/flink-docs-master/docs/deployment/security/security-ssl/#rest-endpoints-external-connectivity) for external connectivity and have a self signed certificate or be self signed.\n\n## Configuration\n\nThe following settings are optional:\n\n- `endpoint` (default: `http://localhost:15672`): The URL of the node to be monitored.\n- `collection_interval` (default = `10s`): This receiver collects metrics on an interval. Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`.\n- `tls` (defaults defined [here](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md)): TLS control. By default insecure settings are rejected and certificate verification is on.\n- `initial_delay` (default = `1s`): defines how long this receiver waits before starting.\n\n### Example Configuration\n\n```yaml\nreceivers:\n flinkmetrics:\n endpoint: http://localhost:8081\n collection_interval: 10s\n```\n\nThe full list of settings exposed for this receiver are documented [here](./config.go) with detailed sample configurations [here](./testdata/config.yaml). TLS config is documented further under the [opentelemetry collector's configtls package](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md).\n\n## Metrics\n\nDetails about the metrics produced by this receiver can be found in [metadata.yaml](./metadata.yaml)","properties":{"CustomRoundTripper":{"description":"Custom Round Tripper to allow for individual components to intercept HTTP requests","title":"CustomRoundTripper"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing HTTP calls.","title":"auth"},"collection_interval":{"title":"collection_interval","type":"string"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"endpoint":{"description":"The target URL to send data to (e.g.: http://some.url:9411/v1/traces).","title":"endpoint","type":"string"},"headers":{"description":"Additional headers attached to each HTTP request sent by the client.\nExisting header values are overwritten if collision happens.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"idle_conn_timeout":{"description":"IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"idle_conn_timeout","type":"string"},"initial_delay":{"title":"initial_delay","type":"string"},"max_conns_per_host":{"description":"MaxConnsPerHost limits the total number of connections per host, including connections in the dialing,\nactive, and idle states.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_conns_per_host","type":"integer"},"max_idle_conns":{"description":"MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns","type":"integer"},"max_idle_conns_per_host":{"description":"MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns_per_host","type":"integer"},"metrics":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.MetricsConfig","title":"metrics"},"read_buffer_size":{"description":"ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize.","title":"read_buffer_size","type":"integer"},"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.ResourceAttributesConfig","title":"resource_attributes"},"timeout":{"description":"Timeout parameter configures `http.Client.Timeout`.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"write_buffer_size":{"description":"WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize.","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.MetricConfig":{"additionalProperties":false,"description":"MetricConfig provides common config for a particular metric.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.MetricsConfig":{"additionalProperties":false,"description":"MetricsConfig provides config for flinkmetrics metrics.","properties":{"flink.job.checkpoint.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.MetricConfig","title":"flink.job.checkpoint.count"},"flink.job.checkpoint.in_progress":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.MetricConfig","title":"flink.job.checkpoint.in_progress"},"flink.job.last_checkpoint.size":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.MetricConfig","title":"flink.job.last_checkpoint.size"},"flink.job.last_checkpoint.time":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.MetricConfig","title":"flink.job.last_checkpoint.time"},"flink.job.restart.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.MetricConfig","title":"flink.job.restart.count"},"flink.jvm.class_loader.classes_loaded":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.MetricConfig","title":"flink.jvm.class_loader.classes_loaded"},"flink.jvm.cpu.load":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.MetricConfig","title":"flink.jvm.cpu.load"},"flink.jvm.cpu.time":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.MetricConfig","title":"flink.jvm.cpu.time"},"flink.jvm.gc.collections.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.MetricConfig","title":"flink.jvm.gc.collections.count"},"flink.jvm.gc.collections.time":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.MetricConfig","title":"flink.jvm.gc.collections.time"},"flink.jvm.memory.direct.total_capacity":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.MetricConfig","title":"flink.jvm.memory.direct.total_capacity"},"flink.jvm.memory.direct.used":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.MetricConfig","title":"flink.jvm.memory.direct.used"},"flink.jvm.memory.heap.committed":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.MetricConfig","title":"flink.jvm.memory.heap.committed"},"flink.jvm.memory.heap.max":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.MetricConfig","title":"flink.jvm.memory.heap.max"},"flink.jvm.memory.heap.used":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.MetricConfig","title":"flink.jvm.memory.heap.used"},"flink.jvm.memory.mapped.total_capacity":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.MetricConfig","title":"flink.jvm.memory.mapped.total_capacity"},"flink.jvm.memory.mapped.used":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.MetricConfig","title":"flink.jvm.memory.mapped.used"},"flink.jvm.memory.metaspace.committed":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.MetricConfig","title":"flink.jvm.memory.metaspace.committed"},"flink.jvm.memory.metaspace.max":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.MetricConfig","title":"flink.jvm.memory.metaspace.max"},"flink.jvm.memory.metaspace.used":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.MetricConfig","title":"flink.jvm.memory.metaspace.used"},"flink.jvm.memory.nonheap.committed":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.MetricConfig","title":"flink.jvm.memory.nonheap.committed"},"flink.jvm.memory.nonheap.max":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.MetricConfig","title":"flink.jvm.memory.nonheap.max"},"flink.jvm.memory.nonheap.used":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.MetricConfig","title":"flink.jvm.memory.nonheap.used"},"flink.jvm.threads.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.MetricConfig","title":"flink.jvm.threads.count"},"flink.memory.managed.total":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.MetricConfig","title":"flink.memory.managed.total"},"flink.memory.managed.used":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.MetricConfig","title":"flink.memory.managed.used"},"flink.operator.record.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.MetricConfig","title":"flink.operator.record.count"},"flink.operator.watermark.output":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.MetricConfig","title":"flink.operator.watermark.output"},"flink.task.record.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.MetricConfig","title":"flink.task.record.count"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig provides common config for a particular resource attribute.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.ResourceAttributesConfig":{"additionalProperties":false,"description":"ResourceAttributesConfig provides config for flinkmetrics resource attributes.","properties":{"flink.job.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.ResourceAttributeConfig","title":"flink.job.name"},"flink.resource.type":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.ResourceAttributeConfig","title":"flink.resource.type"},"flink.subtask.index":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.ResourceAttributeConfig","title":"flink.subtask.index"},"flink.task.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.ResourceAttributeConfig","title":"flink.task.name"},"flink.taskmanager.id":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.ResourceAttributeConfig","title":"flink.taskmanager.id"},"host.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.internal.metadata.ResourceAttributeConfig","title":"host.name"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.fluentforwardreceiver.Config":{"additionalProperties":false,"description":"Config defines configuration for the fluentforward receiver.","properties":{"endpoint":{"description":"The address to listen on for incoming Fluent Forward events. Should be\nof the form `\u003cip addr\u003e:\u003cport\u003e` (TCP) or `unix://\u003csocket_path\u003e` (Unix\ndomain socket).","title":"endpoint","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.googlecloudpubsubreceiver.Config":{"additionalProperties":false,"markdownDescription":"# Google Pubsub Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: traces, logs, metrics |\n| Distributions | [observiq], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\n\u003e ⚠️ This is a community-provided module. It has been developed and extensively tested at Collibra, but it is not officially supported by GCP.\n \nThis receiver gets OTLP messages from a Google Cloud [Pubsub](https://cloud.google.com/pubsub) subscription.\n\nThe following configuration options are supported:\n\n* `project` (Optional): The Google Cloud Project of the client connects to.\n* `subscription` (Required): The subscription name to receive OTLP data from. The subscription name should be a \n fully qualified resource name (eg: `projects/otel-project/subscriptions/otlp`).\n* `encoding` (Optional): The encoding that will be used to received data from the subscription. This can either be\n `otlp_proto_trace`, `otlp_proto_metric`, `otlp_proto_log`, or `raw_text` (see `encoding`). This will only be used as \n a fallback, when no `content-type` attribute is present.\n* `compression` (Optional): The compression that will be used on received data from the subscription. When set it can \n only be `gzip`. This will only be used as a fallback, when no `content-encoding` attribute is present.\n* `endpoint` (Optional): Override the default Pubsub Endpoint, useful when connecting to the PubSub emulator instance\n or switching between [global and regional service endpoints](https://cloud.google.com/pubsub/docs/reference/service_apis_overview#service_endpoints).\n* `insecure` (Optional): allows performing “insecure” SSL connections and transfers, useful when connecting to a local\n emulator instance. Only has effect if Endpoint is not \"\"\n\n```yaml\nreceivers:\n googlecloudpubsub:\n project: otel-project\n subscription: projects/otel-project/subscriptions/otlp-logs\n encoding: raw_json\n```\n\n## Encoding\n\nYou should not need to set the encoding of the subscription as the receiver will try to discover the type of the data\nby looking at the `ce-type` and `content-type` attributes of the message. Only when those attributes are not set \nmust the `encoding` field in the configuration be set. \n\n| ce-type] | ce-datacontenttype | encoding | description |\n| --- | --- | --- | --- |\n| org.opentelemetry.otlp.traces.v1 | application/protobuf | | Decode OTLP trace message |\n| org.opentelemetry.otlp.metrics.v1 | application/protobuf | | Decode OTLP metric message |\n| org.opentelemetry.otlp.logs.v1 | application/json | | Decode OTLP log message |\n| - | - | otlp_proto_trace | Decode OTLP trace message |\n| - | - | otlp_proto_metric | Decode OTLP trace message |\n| - | - | otlp_proto_log | Decode OTLP trace message |\n| - | - | raw_text | Wrap in an OTLP log message |\n\nWhen the `encoding` configuration is set, the attributes on the message are ignored.\n\nThe receiver can be used for ingesting arbitrary text message on a Pubsub subscription and wrap them in OTLP Log\nmessage, making it a convenient way to ingest log lines from Pubsub.\n\n## Pubsub subscription\n\nThe Google Cloud [Pubsub](https://cloud.google.com/pubsub) receiver doesn't automatically create subscriptions, \nit expects the subscription to be created upfront. Security wise it's best to give the collector its own \nservice account and give the subscription `Pub/Sub Subscriber` permission.\n\nThe subscription should also be of delivery type `Pull`.\n\n### Filtering\n\nWhen the messages on the subscription are accompanied by the correct attributes and you only need a specific\ntype in your pipeline, the messages can be [filtered](https://cloud.google.com/pubsub/docs/filtering) on the \nsubscription saving on egress fees.\n\nAn example of filtering on trace message only: \n```\nattributes.ce-type = \"org.opentelemetry.otlp.traces.v1\"\nAND\nattributes.content-type = \"application/protobuf\"\n```","properties":{"client_id":{"description":"The client id that will be used by Pubsub to make load balancing decisions","title":"client_id","type":"string"},"compression":{"description":"Lock down the compression of the payload, leave empty for attribute based detection","title":"compression","type":"string"},"encoding":{"description":"Lock down the encoding of the payload, leave empty for attribute based detection","title":"encoding","type":"string"},"endpoint":{"description":"Override of the Pubsub Endpoint, leave empty for the default endpoint","title":"endpoint","type":"string"},"insecure":{"description":"Only has effect if Endpoint is not \"\"","title":"insecure","type":"boolean"},"project":{"description":"Google Cloud Project ID where the Pubsub client will connect to","title":"project","type":"string"},"subscription":{"description":"The fully qualified resource name of the Pubsub subscription","title":"subscription","type":"string"},"timeout":{"description":"Timeout is the timeout for every attempt to send data to the backend.","title":"timeout","type":"string"},"user_agent":{"description":"User agent that will be used by the Pubsub client to connect to the service","title":"user_agent","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.googlecloudspannerreceiver.Config":{"additionalProperties":false,"markdownDescription":"# Google Cloud Spanner Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: metrics |\n| Distributions | [contrib], [observiq], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nGoogle Cloud Spanner enable you to investigate issues with your database\nby exposing via [Total and Top N built-in tables](https://cloud.google.com/spanner/docs/introspection):\n- Query statistics\n- Read statistics\n- Transaction statistics\n- Lock statistics\n- and others\n\n_Note_: Total and Top N built-in tables are used with 1 minute statistics granularity.\n\nThe ultimate goal of Google Cloud Spanner Receiver is to collect and transform those statistics into metrics\nthat would be convenient for further analysis by users.\n\n## Getting Started\n\nThe following configuration example is:\n\n```yaml\nreceivers:\n googlecloudspanner:\n collection_interval: 60s\n initial_delay: 1s\n top_metrics_query_max_rows: 100\n backfill_enabled: true\n cardinality_total_limit: 200000\n hide_topn_lockstats_rowrangestartkey: false\n truncate_text: false\n projects:\n - project_id: \"spanner project 1\"\n service_account_key: \"path to spanner project 1 service account json key\"\n instances:\n - instance_id: \"id1\"\n databases:\n - \"db11\"\n - \"db12\"\n - instance_id: \"id2\"\n databases:\n - \"db21\"\n - \"db22\"\n - project_id: \"spanner project 2\"\n service_account_key: \"path to spanner project 2 service account json key\"\n instances:\n - instance_id: \"id3\"\n databases:\n - \"db31\"\n - \"db32\"\n - instance_id: \"id4\"\n databases:\n - \"db41\"\n - \"db42\"\n```\n\nBrief description of configuration properties:\n- **googlecloudspanner** - name of the Cloud Spanner Receiver related section in OpenTelemetry collector configuration file\n- **collection_interval** - this receiver runs periodically. Each time it runs, it queries Google Cloud Spanner, creates metrics, and sends them to the next consumer (default: 1 minute). **It is not recommended to change the default value of collection interval, since new values for metrics in the Spanner database appear only once a minute.**\n- **initial_delay** defines how long this receiver waits before starting.\n- **top_metrics_query_max_rows** - max number of rows to fetch from Top N built-in table(100 by default)\n- **backfill_enabled** - turn on/off 1-hour data backfill(by default it is turned off)\n- **cardinality_total_limit** - limit of active series per 24 hours period. If specified, turns on cardinality filtering and handling. If zero or not specified, cardinality is not handled. You can read [this document](cardinality.md) for more information about cardinality handling and filtering.\n- **hide_topn_lockstats_rowrangestartkey** - if true, masks PII (key values) in row_range_start_key label for the \"top minute lock stats\" metric\n- **truncate_text** - if true, the query text is truncated to 1024 characters.\n- **projects** - list of GCP projects\n - **project_id** - identifier of GCP project\n - **service_account_key** - path to service account JSON key It is highly recommended to set this property to the correct value. In case it is empty, the [Application Default Credentials](https://google.aip.dev/auth/4110) will be used for the database connection.\n - **instances** - list of Google Cloud Spanner instance for connection\n - **instance_id** - identifier of Google Cloud Spanner instance\n - **databases** - list of databases used from this instance","properties":{"backfill_enabled":{"title":"backfill_enabled","type":"boolean"},"cardinality_total_limit":{"title":"cardinality_total_limit","type":"integer"},"collection_interval":{"title":"collection_interval","type":"string"},"hide_topn_lockstats_rowrangestartkey":{"title":"hide_topn_lockstats_rowrangestartkey","type":"boolean"},"initial_delay":{"title":"initial_delay","type":"string"},"projects":{"items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.googlecloudspannerreceiver.Project"},"title":"projects","type":"array"},"top_metrics_query_max_rows":{"title":"top_metrics_query_max_rows","type":"integer"},"truncate_text":{"title":"truncate_text","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.googlecloudspannerreceiver.Instance":{"additionalProperties":false,"properties":{"databases":{"items":{"type":"string"},"title":"databases","type":"array"},"instance_id":{"title":"instance_id","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.googlecloudspannerreceiver.Project":{"additionalProperties":false,"properties":{"instances":{"items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.googlecloudspannerreceiver.Instance"},"title":"instances","type":"array"},"project_id":{"title":"project_id","type":"string"},"service_account_key":{"title":"service_account_key","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.Config":{"additionalProperties":false,"markdownDescription":"# HAProxy Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [alpha]: metrics |\n| Distributions | [contrib], [sumo] |\n\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe HAProxy receiver generates metrics by polling periodically the HAProxy process through a dedicated socket or HTTP URL.\n\n## Getting Started\n\n## Configuration\n\n### endpoint (required)\nPath to the endpoint exposed by HAProxy for communications. It can be a local file socket or a HTTP URL.\n\n### Collection interval settings (optional)\nThe scraping collection interval can be configured.\n\nDefault: 1 minute\n\n### Initial delay settings (optional)\ndefines how long this receiver waits before starting.\n\nDefault: `1s` \n\n### Example configuration\n\n```yaml\nhaproxy:\n endpoint: file:///var/run/haproxy.ipc\n collection_interval: 1m\n metrics:\n \n```\n\n## Enabling metrics.\n\nSee [documentation.md](./documentation.md).\n\nYou can enable or disable selective metrics.\n\nExample:\n\n```yaml\nreceivers:\n haproxy:\n endpoint: http://127.0.0.1:8080/stats\n metrics:\n haproxy.connection_rate:\n enabled: false\n haproxy.requests:\n enabled: true\n```","properties":{"CustomRoundTripper":{"description":"Custom Round Tripper to allow for individual components to intercept HTTP requests","title":"CustomRoundTripper"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing HTTP calls.","title":"auth"},"collection_interval":{"title":"collection_interval","type":"string"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"endpoint":{"description":"The target URL to send data to (e.g.: http://some.url:9411/v1/traces).","title":"endpoint","type":"string"},"headers":{"description":"Additional headers attached to each HTTP request sent by the client.\nExisting header values are overwritten if collision happens.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"idle_conn_timeout":{"description":"IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"idle_conn_timeout","type":"string"},"initial_delay":{"title":"initial_delay","type":"string"},"max_conns_per_host":{"description":"MaxConnsPerHost limits the total number of connections per host, including connections in the dialing,\nactive, and idle states.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_conns_per_host","type":"integer"},"max_idle_conns":{"description":"MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns","type":"integer"},"max_idle_conns_per_host":{"description":"MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns_per_host","type":"integer"},"metrics":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.MetricsConfig","title":"metrics"},"read_buffer_size":{"description":"ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize.","title":"read_buffer_size","type":"integer"},"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.ResourceAttributesConfig","title":"resource_attributes"},"timeout":{"description":"Timeout parameter configures `http.Client.Timeout`.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"write_buffer_size":{"description":"WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize.","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.MetricConfig":{"additionalProperties":false,"description":"MetricConfig provides common config for a particular metric.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.MetricsConfig":{"additionalProperties":false,"description":"MetricsConfig provides config for haproxy metrics.","properties":{"haproxy.bytes.input":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.MetricConfig","title":"haproxy.bytes.input"},"haproxy.bytes.output":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.MetricConfig","title":"haproxy.bytes.output"},"haproxy.clients.canceled":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.MetricConfig","title":"haproxy.clients.canceled"},"haproxy.compression.bypass":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.MetricConfig","title":"haproxy.compression.bypass"},"haproxy.compression.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.MetricConfig","title":"haproxy.compression.count"},"haproxy.compression.input":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.MetricConfig","title":"haproxy.compression.input"},"haproxy.compression.output":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.MetricConfig","title":"haproxy.compression.output"},"haproxy.connections.errors":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.MetricConfig","title":"haproxy.connections.errors"},"haproxy.connections.rate":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.MetricConfig","title":"haproxy.connections.rate"},"haproxy.connections.retries":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.MetricConfig","title":"haproxy.connections.retries"},"haproxy.connections.total":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.MetricConfig","title":"haproxy.connections.total"},"haproxy.downtime":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.MetricConfig","title":"haproxy.downtime"},"haproxy.failed_checks":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.MetricConfig","title":"haproxy.failed_checks"},"haproxy.requests.denied":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.MetricConfig","title":"haproxy.requests.denied"},"haproxy.requests.errors":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.MetricConfig","title":"haproxy.requests.errors"},"haproxy.requests.queued":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.MetricConfig","title":"haproxy.requests.queued"},"haproxy.requests.rate":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.MetricConfig","title":"haproxy.requests.rate"},"haproxy.requests.redispatched":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.MetricConfig","title":"haproxy.requests.redispatched"},"haproxy.requests.total":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.MetricConfig","title":"haproxy.requests.total"},"haproxy.responses.denied":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.MetricConfig","title":"haproxy.responses.denied"},"haproxy.responses.errors":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.MetricConfig","title":"haproxy.responses.errors"},"haproxy.server_selected.total":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.MetricConfig","title":"haproxy.server_selected.total"},"haproxy.sessions.average":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.MetricConfig","title":"haproxy.sessions.average"},"haproxy.sessions.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.MetricConfig","title":"haproxy.sessions.count"},"haproxy.sessions.rate":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.MetricConfig","title":"haproxy.sessions.rate"},"haproxy.sessions.total":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.MetricConfig","title":"haproxy.sessions.total"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig provides common config for a particular resource attribute.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.ResourceAttributesConfig":{"additionalProperties":false,"description":"ResourceAttributesConfig provides config for haproxy resource attributes.","properties":{"haproxy.addr":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.ResourceAttributeConfig","title":"haproxy.addr"},"haproxy.algo":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.ResourceAttributeConfig","title":"haproxy.algo"},"haproxy.iid":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.ResourceAttributeConfig","title":"haproxy.iid"},"haproxy.pid":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.ResourceAttributeConfig","title":"haproxy.pid"},"haproxy.sid":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.ResourceAttributeConfig","title":"haproxy.sid"},"haproxy.type":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.ResourceAttributeConfig","title":"haproxy.type"},"haproxy.url":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.ResourceAttributeConfig","title":"haproxy.url"},"proxy_name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.ResourceAttributeConfig","title":"proxy_name"},"service_name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.internal.metadata.ResourceAttributeConfig","title":"service_name"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.hostmetricsreceiver.Config":{"additionalProperties":false,"description":"Config defines configuration for HostMetrics receiver.","markdownDescription":"# Host Metrics Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: metrics |\n| Distributions | [core], [contrib], [observiq], [splunk], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe Host Metrics receiver generates metrics about the host system scraped\nfrom various sources. This is intended to be used when the collector is\ndeployed as an agent.\n\n## Getting Started\n\nThe collection interval, root path, and the categories of metrics to be scraped can be\nconfigured:\n\n```yaml\nhostmetrics:\n collection_interval: \u003cduration\u003e # default = 1m\n initial_delay: \u003cduration\u003e # default = 1s\n root_path: \u003cstring\u003e\n scrapers:\n \u003cscraper1\u003e:\n \u003cscraper2\u003e:\n ...\n```\n\nThe available scrapers are:\n\n| Scraper | Supported OSs | Description |\n| ------------ | ---------------------------- | ------------------------------------------------------ |\n| [cpu] | All except Mac\u003csup\u003e[1]\u003c/sup\u003e | CPU utilization metrics |\n| [disk] | All except Mac\u003csup\u003e[1]\u003c/sup\u003e | Disk I/O metrics |\n| [load] | All | CPU load metrics |\n| [filesystem] | All | File System utilization metrics |\n| [memory] | All | Memory utilization metrics |\n| [network] | All | Network interface I/O metrics \u0026 TCP connection metrics |\n| [paging] | All | Paging/Swap space utilization and I/O metrics |\n| [processes] | Linux, Mac | Process count metrics |\n| [process] | Linux, Windows, Mac | Per process CPU, Memory, and Disk I/O metrics |\n\n[cpu]: ./internal/scraper/cpuscraper/documentation.md\n[disk]: ./internal/scraper/diskscraper/documentation.md\n[filesystem]: ./internal/scraper/filesystemscraper/documentation.md\n[load]: ./internal/scraper/loadscraper/documentation.md\n[memory]: ./internal/scraper/memoryscraper/documentation.md\n[network]: ./internal/scraper/networkscraper/documentation.md\n[paging]: ./internal/scraper/pagingscraper/documentation.md\n[processes]: ./internal/scraper/processesscraper/documentation.md\n[process]: ./internal/scraper/processscraper/documentation.md\n\n### Notes\n\n\u003csup\u003e[1]\u003c/sup\u003e Not supported on Mac when compiled without cgo which is the default.\n\nSeveral scrapers support additional configuration:\n\n### Disk\n\n```yaml\ndisk:\n \u003cinclude|exclude\u003e:\n devices: [ \u003cdevice name\u003e, ... ]\n match_type: \u003cstrict|regexp\u003e\n```\n\n### File System\n\n```yaml\nfilesystem:\n \u003cinclude_devices|exclude_devices\u003e:\n devices: [ \u003cdevice name\u003e, ... ]\n match_type: \u003cstrict|regexp\u003e\n \u003cinclude_fs_types|exclude_fs_types\u003e:\n fs_types: [ \u003cfilesystem type\u003e, ... ]\n match_type: \u003cstrict|regexp\u003e\n \u003cinclude_mount_points|exclude_mount_points\u003e:\n mount_points: [ \u003cmount point\u003e, ... ]\n match_type: \u003cstrict|regexp\u003e\n```\n\n### Load\n\n`cpu_average` specifies whether to divide the average load by the reported number of logical CPUs (default: `false`).\n\n```yaml\nload:\n cpu_average: \u003cfalse|true\u003e\n```\n\n### Network\n\n```yaml\nnetwork:\n \u003cinclude|exclude\u003e:\n interfaces: [ \u003cinterface name\u003e, ... ]\n match_type: \u003cstrict|regexp\u003e\n```\n\n### Process\n\n```yaml\nprocess:\n \u003cinclude|exclude\u003e:\n names: [ \u003cprocess name\u003e, ... ]\n match_type: \u003cstrict|regexp\u003e\n mute_process_name_error: \u003ctrue|false\u003e\n mute_process_exe_error: \u003ctrue|false\u003e\n mute_process_io_error: \u003ctrue|false\u003e\n scrape_process_delay: \u003ctime\u003e\n```\n\n## Advanced Configuration\n\n### Filtering\n\nIf you are only interested in a subset of metrics from a particular source,\nit is recommended you use this receiver with the\n[Filter Processor](../../processor/filterprocessor).\n\n### Different Frequencies\n\nIf you would like to scrape some metrics at a different frequency than others,\nyou can configure multiple `hostmetrics` receivers with different\n`collection_interval` values. For example:\n\n```yaml\nreceivers:\n hostmetrics:\n collection_interval: 30s\n scrapers:\n cpu:\n memory:\n\n hostmetrics/disk:\n collection_interval: 1m\n scrapers:\n disk:\n filesystem:\n\nservice:\n pipelines:\n metrics:\n receivers: [hostmetrics, hostmetrics/disk]\n```\n\n### Collecting host metrics from inside a container (Linux only)\n\nHost metrics are collected from the Linux system directories on the filesystem.\nYou likely want to collect metrics about the host system and not the container.\nThis is achievable by following these steps: \n\n#### 1. Bind mount the host filesystem\n\nThe simplest configuration is to mount the entire host filesystem when running \nthe container. e.g. `docker run -v /:/hostfs ...`.\n\nYou can also choose which parts of the host filesystem to mount, if you know \nexactly what you'll need. e.g. `docker run -v /proc:/hostfs/proc`.\n\n#### 2. Configure `root_path`\n\nConfigure `root_path` so the hostmetrics receiver knows where the root filesystem is.\nNote: if running multiple instances of the host metrics receiver, they must all have\nthe same `root_path`.\n\nExample:\n```yaml\nreceivers:\n hostmetrics:\n root_path: /hostfs\n```\n\n## Resource attributes\n\nCurrently, the hostmetrics receiver does not set any Resource attributes on the exported metrics. However, if you want to set Resource attributes, you can provide them via environment variables via the [resourcedetection](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/resourcedetectionprocessor#environment-variable) processor. For example, you can add the following resource attributes to adhere to [Resource Semantic Conventions](https://opentelemetry.io/docs/reference/specification/resource/semantic_conventions/):\n\n```\nexport OTEL_RESOURCE_ATTRIBUTES=\"service.name=\u003cthe name of your service\u003e,service.namespace=\u003cthe namespace of your service\u003e,service.instance.id=\u003cuuid of the instance\u003e\"\n```","properties":{"collection_interval":{"title":"collection_interval","type":"string"},"initial_delay":{"title":"initial_delay","type":"string"},"root_path":{"description":"RootPath is the host's root directory (linux only).","title":"root_path","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.httpcheckreceiver.Config":{"additionalProperties":false,"description":"Config defines the configuration for the various elements of the receiver agent.","markdownDescription":"# HTTP Check Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [development]: metrics |\n| Distributions | [contrib], [sumo] |\n\n[development]: https://github.com/open-telemetry/opentelemetry-collector#development\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe HTTP Check Receiver can be used for synthethic checks against HTTP endpoints. This receiver will make a request to the specified `endpoint` using the\nconfigured `method`. This scraper generates a metric with a label for each HTTP response status class with a value of `1` if the status code matches the\nclass. For example, the following metrics will be generated if the endpoint returned a `200`:\n\n```\nhttpcheck.status{http.status_class:1xx, http.status_code:200,...} = 0\nhttpcheck.status{http.status_class:2xx, http.status_code:200,...} = 1\nhttpcheck.status{http.status_class:3xx, http.status_code:200,...} = 0\nhttpcheck.status{http.status_class:4xx, http.status_code:200,...} = 0\nhttpcheck.status{http.status_class:5xx, http.status_code:200,...} = 0\n```\n\n## Configuration\n\nThe following configuration settings are required:\n\n- `endpoint`: The URL of the endpoint to be monitored.\n\nThe following configuration settings are optional:\n\n- `method` (default: `GET`): The method used to call the endpoint.\n- `collection_interval` (default = `60s`): This receiver collects metrics on an interval. Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`.\n- `initial_delay` (default = `1s`): defines how long this receiver waits before starting.\n\n### Example Configuration\n\n```yaml\nreceivers:\n httpcheck:\n endpoint: http://endpoint:80\n method: GET\n collection_interval: 10s\n```\n\n## Metrics\n\nDetails about the metrics produced by this receiver can be found in [documentation.md](./documentation.md)","properties":{"CustomRoundTripper":{"description":"Custom Round Tripper to allow for individual components to intercept HTTP requests","title":"CustomRoundTripper"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing HTTP calls.","title":"auth"},"collection_interval":{"title":"collection_interval","type":"string"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"endpoint":{"description":"The target URL to send data to (e.g.: http://some.url:9411/v1/traces).","title":"endpoint","type":"string"},"headers":{"description":"Additional headers attached to each HTTP request sent by the client.\nExisting header values are overwritten if collision happens.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"idle_conn_timeout":{"description":"IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"idle_conn_timeout","type":"string"},"initial_delay":{"title":"initial_delay","type":"string"},"max_conns_per_host":{"description":"MaxConnsPerHost limits the total number of connections per host, including connections in the dialing,\nactive, and idle states.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_conns_per_host","type":"integer"},"max_idle_conns":{"description":"MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns","type":"integer"},"max_idle_conns_per_host":{"description":"MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns_per_host","type":"integer"},"method":{"title":"method","type":"string"},"metrics":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.httpcheckreceiver.internal.metadata.MetricsConfig","title":"metrics"},"read_buffer_size":{"description":"ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize.","title":"read_buffer_size","type":"integer"},"timeout":{"description":"Timeout parameter configures `http.Client.Timeout`.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"write_buffer_size":{"description":"WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize.","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.httpcheckreceiver.internal.metadata.MetricConfig":{"additionalProperties":false,"description":"MetricConfig provides common config for a particular metric.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.httpcheckreceiver.internal.metadata.MetricsConfig":{"additionalProperties":false,"description":"MetricsConfig provides config for httpcheck metrics.","properties":{"httpcheck.duration":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.httpcheckreceiver.internal.metadata.MetricConfig","title":"httpcheck.duration"},"httpcheck.error":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.httpcheckreceiver.internal.metadata.MetricConfig","title":"httpcheck.error"},"httpcheck.status":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.httpcheckreceiver.internal.metadata.MetricConfig","title":"httpcheck.status"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.iisreceiver.Config":{"additionalProperties":false,"description":"Config defines configuration for simple prometheus receiver.","markdownDescription":"# Microsoft IIS Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: metrics |\n| Distributions | [contrib], [observiq], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe `iis` receiver grabs metrics about an IIS instance using the Windows Performance Counters.\nBecause of this, it is a Windows only receiver.\n\n## Configuration\n\nThe following settings are optional:\n\n- `collection_interval` (default = `10s`): The interval at which metrics should be emitted by this receiver.\n- `initial_delay` (default = `1s`): defines how long this receiver waits before starting.\n\nExample:\n\n```yaml\n receivers:\n iis:\n collection_interval: 10s\n initial_delay: 1s\n\n```\n\nThe full list of settings exposed for this receiver are documented [here](./config.go).\n\n## Metrics\n\nDetails about the metrics produced by this receiver can be found in [documentation.md](./documentation.md)","properties":{"collection_interval":{"title":"collection_interval","type":"string"},"initial_delay":{"title":"initial_delay","type":"string"},"metrics":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.iisreceiver.internal.metadata.MetricsConfig","title":"metrics"},"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.iisreceiver.internal.metadata.ResourceAttributesConfig","title":"resource_attributes"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.iisreceiver.internal.metadata.MetricConfig":{"additionalProperties":false,"description":"MetricConfig provides common config for a particular metric.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.iisreceiver.internal.metadata.MetricsConfig":{"additionalProperties":false,"description":"MetricsConfig provides config for iis metrics.","properties":{"iis.connection.active":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.iisreceiver.internal.metadata.MetricConfig","title":"iis.connection.active"},"iis.connection.anonymous":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.iisreceiver.internal.metadata.MetricConfig","title":"iis.connection.anonymous"},"iis.connection.attempt.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.iisreceiver.internal.metadata.MetricConfig","title":"iis.connection.attempt.count"},"iis.network.blocked":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.iisreceiver.internal.metadata.MetricConfig","title":"iis.network.blocked"},"iis.network.file.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.iisreceiver.internal.metadata.MetricConfig","title":"iis.network.file.count"},"iis.network.io":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.iisreceiver.internal.metadata.MetricConfig","title":"iis.network.io"},"iis.request.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.iisreceiver.internal.metadata.MetricConfig","title":"iis.request.count"},"iis.request.queue.age.max":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.iisreceiver.internal.metadata.MetricConfig","title":"iis.request.queue.age.max"},"iis.request.queue.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.iisreceiver.internal.metadata.MetricConfig","title":"iis.request.queue.count"},"iis.request.rejected":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.iisreceiver.internal.metadata.MetricConfig","title":"iis.request.rejected"},"iis.thread.active":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.iisreceiver.internal.metadata.MetricConfig","title":"iis.thread.active"},"iis.uptime":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.iisreceiver.internal.metadata.MetricConfig","title":"iis.uptime"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.iisreceiver.internal.metadata.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig provides common config for a particular resource attribute.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.iisreceiver.internal.metadata.ResourceAttributesConfig":{"additionalProperties":false,"description":"ResourceAttributesConfig provides config for iis resource attributes.","properties":{"iis.application_pool":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.iisreceiver.internal.metadata.ResourceAttributeConfig","title":"iis.application_pool"},"iis.site":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.iisreceiver.internal.metadata.ResourceAttributeConfig","title":"iis.site"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.influxdbreceiver.Config":{"additionalProperties":false,"description":"Config defines configuration for the InfluxDB receiver.","markdownDescription":"# InfluxDB Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: metrics |\n| Distributions | [contrib], [observiq], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThis receiver accepts metrics data as [InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/).\n\nWrite endpoints exist at `/write` (InfluxDB 1.x compatibility) and `/api/v2/write` (InfluxDB 2.x compatibility).\nWrite query parameters `db`/`rp` (InfluxDB 1.x) and `org`/`bucket` (InfluxDB 2.x) are ignored.\nWrite query parameter `precision` is optional, defaults to `ns`.\n\nWrite responses:\n- 204: success, no further response needed (no content)\n- 400: permanent failure; check response body for details\n- 500: retryable error; check response body for details\n\n## Configuration\n\nThe following configuration options are supported:\n\n* `endpoint` (default = 0.0.0.0:8086) HTTP service endpoint for the line protocol receiver\n\nThe full list of settings exposed for this receiver are documented in [config.go](config.go).\n\nExample:\n```yaml\nreceivers:\n influxdb:\n endpoint: 0.0.0.0:8080\n```\n\n## Definitions\n\n[InfluxDB](https://www.influxdata.com/products/influxdb/) is an open-source time series database.\n\n[Telegraf](https://www.influxdata.com/time-series-platform/telegraf/) is an open-source metrics agent, similar to the OpenTelemetry Collector.\nTelegraf has [hundreds of plugins](https://www.influxdata.com/products/integrations/?_integrations_dropdown=telegraf-plugins).\n\n[Line protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) is a textual HTTP payload format used to move metrics between Telegraf agents and InfluxDB instances.\n\n## Schema\n\nThe InfluxDB-\u003eOpenTelemetry conversion [schema](https://github.com/influxdata/influxdb-observability/blob/main/docs/index.md) and [implementation](https://github.com/influxdata/influxdb-observability/tree/main/influx2otel) are hosted at https://github.com/influxdata/influxdb-observability .\nThis receiver automatically detects schema at parse time.\n\n### Example: Metrics - `prometheus-v1`\n```\ncpu_temp,foo=bar gauge=87.332\nhttp_requests_total,method=post,code=200 counter=1027\nhttp_requests_total,method=post,code=400 counter=3\nhttp_request_duration_seconds 0.05=24054,0.1=33444,0.2=100392,0.5=129389,1=133988,sum=53423,count=144320\nrpc_duration_seconds 0.01=3102,0.05=3272,0.5=4773,0.9=9001,0.99=76656,sum=1.7560473e+07,count=2693\n```\n\n### Example: Metrics - `prometheus-v2`\n```\nprometheus,foo=bar cpu_temp=87.332\nprometheus,method=post,code=200 http_requests_total=1027\nprometheus,method=post,code=400 http_requests_total=3\nprometheus,le=0.05 http_request_duration_seconds_bucket=24054\nprometheus,le=0.1 http_request_duration_seconds_bucket=33444\nprometheus,le=0.2 http_request_duration_seconds_bucket=100392\nprometheus,le=0.5 http_request_duration_seconds_bucket=129389\nprometheus,le=1 http_request_duration_seconds_bucket=133988\nprometheus http_request_duration_seconds_count=144320,http_request_duration_seconds_sum=53423\nprometheus,quantile=0.01 rpc_duration_seconds=3102\nprometheus,quantile=0.05 rpc_duration_seconds=3272\nprometheus,quantile=0.5 rpc_duration_seconds=4773\nprometheus,quantile=0.9 rpc_duration_seconds=9001\nprometheus,quantile=0.99 rpc_duration_seconds=76656\nprometheus rpc_duration_seconds_count=1.7560473e+07,rpc_duration_seconds_sum=2693\n```","properties":{"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth for this receiver","title":"auth"},"cors":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.confighttp.CORSSettings","description":"CORS configures the server for HTTP cross-origin resource sharing (CORS).","title":"cors"},"endpoint":{"description":"Endpoint configures the listening address for the server.","title":"endpoint","type":"string"},"include_metadata":{"description":"IncludeMetadata propagates the client metadata from the incoming requests to the downstream consumers\nExperimental: *NOTE* this option is subject to change or removal in the future.","title":"include_metadata","type":"boolean"},"max_request_body_size":{"description":"MaxRequestBodySize sets the maximum request body size in bytes","title":"max_request_body_size","type":"integer"},"response_headers":{"description":"Additional headers attached to each HTTP response sent to the client.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"response_headers","type":"object"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSServerSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.jaegerreceiver.Config":{"additionalProperties":false,"description":"Config defines configuration for Jaeger receiver.","properties":{"protocols":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.jaegerreceiver.Protocols","title":"protocols"},"remote_sampling":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.jaegerreceiver.RemoteSamplingConfig","title":"remote_sampling"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.jaegerreceiver.ProtocolUDP":{"additionalProperties":false,"description":"ProtocolUDP is the configuration for a UDP protocol.","properties":{"endpoint":{"title":"endpoint","type":"string"},"max_packet_size":{"title":"max_packet_size","type":"integer"},"queue_size":{"title":"queue_size","type":"integer"},"socket_buffer_size":{"title":"socket_buffer_size","type":"integer"},"workers":{"title":"workers","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.jaegerreceiver.Protocols":{"additionalProperties":false,"description":"Protocols is the configuration for the supported protocols.","properties":{"grpc":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configgrpc.GRPCServerSettings","title":"grpc"},"thrift_binary":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.jaegerreceiver.ProtocolUDP","title":"thrift_binary"},"thrift_compact":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.jaegerreceiver.ProtocolUDP","title":"thrift_compact"},"thrift_http":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.confighttp.HTTPServerSettings","title":"thrift_http"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.jaegerreceiver.RemoteSamplingConfig":{"additionalProperties":false,"description":"RemoteSamplingConfig defines config key for remote sampling fetch endpoint","markdownDescription":"# Jaeger Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: traces |\n| Distributions | [core], [contrib], [aws], [observiq], [splunk], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[aws]: https://github.com/aws-observability/aws-otel-collector\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nReceives trace data in [Jaeger](https://www.jaegertracing.io/) format.\n\n## Getting Started\n\nBy default, the Jaeger receiver will not serve any protocol. A protocol must be\nnamed under the `protocols` object for the jaeger receiver to start. The\nbelow protocols are supported, each supports an optional `endpoint`\nobject configuration parameter.\n\n- `grpc` (default `endpoint` = 0.0.0.0:14250)\n- `thrift_binary` (default `endpoint` = 0.0.0.0:6832)\n- `thrift_compact` (default `endpoint` = 0.0.0.0:6831)\n- `thrift_http` (default `endpoint` = 0.0.0.0:14268)\n\nExamples:\n\n```yaml\nreceivers:\n jaeger:\n protocols:\n grpc:\n jaeger/withendpoint:\n protocols:\n grpc:\n endpoint: 0.0.0.0:14260\n```\n\n## Advanced Configuration\n\nUDP protocols (currently `thrift_binary` and `thrift_compact`) allow setting additional\nserver options:\n\n- `queue_size` (default 1000) sets max not yet handled requests to server\n- `max_packet_size` (default 65_000) sets max UDP packet size\n- `workers` (default 10) sets number of workers consuming the server queue\n- `socket_buffer_size` (default 0 - no buffer) sets buffer size of connection socket in bytes\n\nExamples:\n\n```yaml\nprotocols:\n thrift_binary:\n endpoint: 0.0.0.0:6832\n queue_size: 5_000\n max_packet_size: 131_072\n workers: 50\n socket_buffer_size: 8_388_608\n```\n\nSeveral helper files are leveraged to provide additional capabilities automatically:\n\n- [gRPC settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configgrpc/README.md) including CORS\n- [TLS and mTLS settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md)\n\n## Remote Sampling\n\nSince version [v0.61.0](https://github.com/open-telemetry/opentelemetry-collector-contrib/releases/tag/v0.61.0), remote sampling is no longer supported by the jaeger receiver. Since version [v0.59.0](https://github.com/open-telemetry/opentelemetry-collector-contrib/releases/tag/v0.59.0), the [jaegerremotesapmpling](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/v0.61.0/extension/jaegerremotesampling/README.md) extension is available that can be used instead.","properties":{"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing RPCs.","title":"auth"},"balancer_name":{"description":"Sets the balancer in grpclb_policy to discover the servers. Default is pick_first.\nhttps://github.com/grpc/grpc-go/blob/master/examples/features/load_balancing/README.md","title":"balancer_name","type":"string"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"endpoint":{"description":"The target to which the exporter is going to send traces or metrics,\nusing the gRPC protocol. The valid syntax is described at\nhttps://github.com/grpc/grpc/blob/master/doc/naming.md.","title":"endpoint","type":"string"},"headers":{"description":"The headers associated with gRPC requests.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"host_endpoint":{"title":"host_endpoint","type":"string"},"keepalive":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configgrpc.KeepaliveClientConfig","description":"The keepalive parameters for gRPC client. See grpc.WithKeepaliveParams.\n(https://godoc.org/google.golang.org/grpc#WithKeepaliveParams).","title":"keepalive"},"read_buffer_size":{"description":"ReadBufferSize for gRPC client. See grpc.WithReadBufferSize.\n(https://godoc.org/google.golang.org/grpc#WithReadBufferSize).","title":"read_buffer_size","type":"integer"},"strategy_file":{"title":"strategy_file","type":"string"},"strategy_file_reload_interval":{"title":"strategy_file_reload_interval","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"wait_for_ready":{"description":"WaitForReady parameter configures client to wait for ready state before sending data.\n(https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md)","title":"wait_for_ready","type":"boolean"},"write_buffer_size":{"description":"WriteBufferSize for gRPC gRPC. See grpc.WithWriteBufferSize.\n(https://godoc.org/google.golang.org/grpc#WithWriteBufferSize).","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.jmxreceiver.Config":{"additionalProperties":false,"markdownDescription":"# JMX Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [alpha]: metrics |\n| Distributions | [contrib], [observiq], [splunk], [sumo] |\n\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\n### Overview\n\nThe JMX Receiver will work in conjunction with the [OpenTelemetry JMX Metric Gatherer](https://github.com/open-telemetry/opentelemetry-java-contrib/blob/main/jmx-metrics/README.md)\nto report metrics from a target MBean server using a built-in `otel` helper-utilizing Groovy script.\n\n### Details\n\nThis receiver will launch a child JRE process running the JMX Metric Gatherer configured with your specified JMX\nconnection information and target Groovy script. It then reports metrics to an implicitly created OTLP receiver.\nIn order to use you will need to download the most [recent release](https://repo1.maven.org/maven2/io/opentelemetry/contrib/opentelemetry-java-contrib-jmx-metrics/)\nof the JMX Metric Gatherer JAR and configure the receiver with its path. It is assumed that the JRE is\navailable on your system.\n\n# Configuration\n\nNote: this receiver is in alpha and functionality and configuration fields are subject to change.\n\nExample configuration:\n\n```yaml\nreceivers:\n jmx:\n jar_path: /opt/opentelemetry-java-contrib-jmx-metrics.jar\n endpoint: my_jmx_host:12345\n target_system: jvm\n collection_interval: 10s\n initial_delay: 1s\n # optional: the same as specifying OTLP receiver endpoint.\n otlp:\n endpoint: mycollectorotlpreceiver:4317\n username: my_jmx_username\n # determined by the environment variable value\n password: ${env:MY_JMX_PASSWORD}\n resource_attributes: my.attr=my.value,my.other.attr=my.other.value\n log_level: info\n additional_jars:\n - /path/to/other.jar\n```\n\n### jar_path (default: `/opt/opentelemetry-java-contrib-jmx-metrics.jar`)\n\nThe path for the JMX Metric Gatherer uber JAR to run. This must represent a released version 1.9+ of the jar, \nwhich can be downloaded from [github](https://github.com/open-telemetry/opentelemetry-java-contrib/releases). \nIf a non-released version is required, you can specify a custom version by providing the sha256 hash of your \ncustom version of the jar during collector build time using the `ldflags` option. \n\n```bash\ngo build -ldflags \"-X github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jmxreceiver.MetricsGathererHash=\u003csha256hash\u003e\" ...\n```\n\n### endpoint\nThe [JMX Service URL](https://docs.oracle.com/javase/8/docs/api/javax/management/remote/JMXServiceURL.html) or host\nand port used to construct the Service URL the Metric Gatherer's JMX client should use. Value must be in the form of\n`service:jmx:\u003cprotocol\u003e:\u003csap\u003e` or `host:port`. Values in `host:port` form will be used to create a Service URL of\n`service:jmx:rmi:///jndi/rmi://\u003chost\u003e:\u003cport\u003e/jmxrmi`.\n\nWhen in or coerced to `service:jmx:\u003cprotocol\u003e:\u003csap\u003e` form, corresponds to the `otel.jmx.service.url` property.\n\n_Required._\n\n### target_system\n\nThe built-in target system (or systems) metric gatherer script to run.\nMust be a subset of: `\"activemq\"`, `\"cassandra\"`, `\"hbase\"`, `\"hadoop\"`, `\"jetty\"`, `\"jvm\"`, `\"kafka\"`, `\"kafka-consumer\"`, `\"kafka-producer\"`, `\"solr\"`, `\"tomcat\"`, `\"wildfly\"`.\n\nIf additional target systems must be supported (because of a custom JMX metrics gatherer jar configured using the \n`MetricsGathererHash` build time config), they can be added with another build time flag.\n\n```bash\ngo build -ldflags \"-X github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jmxreceiver.MetricsGathererHash=\u003csha256hash\u003e\n -X github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jmxreceiver.AdditionalTargetSystems=newtarget,othernewtarget\" ...\n```\n\nCorresponds to the `otel.jmx.target.system` property.\n\n### collection_interval (default: `10s`)\n\nThe interval time for the Groovy script to be run and metrics to be exported by the JMX Metric Gatherer within the persistent JRE process.\n\nCorresponds to the `otel.jmx.interval.milliseconds` property.\n\n### initial_delay (default: `1s`)\n\nDefines how long this receiver waits before starting.\n\n### username\n\nThe username to use for JMX authentication.\n\nCorresponds to the `otel.jmx.username` property.\n\n### password\n\nThe password to use for JMX authentication.\n\nCorresponds to the `otel.jmx.password` property.\n\n### otlp.endpoint (default: `0.0.0.0:\u003crandom open port\u003e`)\n\nThe otlp exporter endpoint to which to listen and submit metrics.\n\nCorresponds to the `otel.exporter.otlp.endpoint` property.\n\n### otlp.timeout (default: `5s`)\n\nThe otlp exporter request timeout.\n\nCorresponds to the `otel.exporter.otlp.metric.timeout` property.\n\n### otlp.headers\n\nThe headers to include in otlp metric submission requests.\n\nCorresponds to the `otel.exporter.otlp.metadata` property.\n\n### keystore_path\n\nThe keystore path is required if SSL is enabled on the target JVM.\n\nCorresponds to the `javax.net.ssl.keyStore` property.\n\n### keystore_password\n\nThe keystore file password if required by SSL.\n\nCorresponds to the `javax.net.ssl.keyStorePassword` property.\n\n### keystore_type\n\nThe keystore type if required by SSL.\n\nCorresponds to the `javax.net.ssl.keyStoreType` property.\n\n### truststore_path \n\nThe truststore path if the SSL profile is required.\n\nCorresponds to the `javax.net.ssl.trustStore` property.\n\n### truststore_password\n\nThe truststore file password if required by SSL.\n\nCorresponds to the `javax.net.ssl.trustStorePassword` property.\n\n### truststore_type\n\nThe truststore type if required by SSL.\n\nCorresponds to the `javax.net.ssl.trustStoreType` property.\n\n### remote_profile\n\nSupported JMX remote profiles are TLS in combination with SASL profiles: SASL/PLAIN, SASL/DIGEST-MD5 and SASL/CRAM-MD5.\nShould be one of: `\"SASL/PLAIN\"`, `\"SASL/DIGEST-MD5\"`, `\"SASL/CRAM-MD5\"`, `\"TLS SASL/PLAIN\"`, `\"TLS SASL/DIGEST-MD5\"`,\nor `\"TLS SASL/CRAM-MD5\"`, though no enforcement is applied.\n\nCorresponds to the `otel.jmx.remote.profile` property.\n\n### realm\n\nThe realm, as required by remote profile SASL/DIGEST-MD5.\n\nCorresponds to the `otel.jmx.realm` property.\n\n### additional_jars\n\nAdditional JARs to be included in the java command classpath. This is currently only used for support for `wildfly`, where the Additional Jar should be a version of the jboss-client jar found on your wildfly installation.\n\n### resource_attributes\n\nList of resource attributes that will be applied to any metrics emitted from the metrics gatherer.\n\nCorresponds to the `otel.resource.attributes` property.\n\n### log_level\n\nSLF4J log level for the JMX metrics gatherer. Must be one of: `\"trace\"`, `\"debug\"`, `\"info\"`, `\"warn\"`, `\"error\"`, `\"off\"`. If not provided, will attempt to match to the current log level of the collector.\n\nCorresponds to the `org.slf4j.simpleLogger.defaultLogLevel` property.","properties":{"additional_jars":{"description":"Array of additional JARs to be added to the the class path when launching the JMX Metric Gatherer JAR","items":{"type":"string"},"title":"additional_jars","type":"array"},"collection_interval":{"description":"The duration in between groovy script invocations and metric exports (10 seconds by default).\nWill be converted to milliseconds.","title":"collection_interval","type":"string"},"endpoint":{"description":"The Service URL or host:port for the target coerced to one of form: service:jmx:rmi:///jndi/rmi://\u003chost\u003e:\u003cport\u003e/jmxrmi.","title":"endpoint","type":"string"},"jar_path":{"description":"The path for the JMX Metric Gatherer uber JAR (/opt/opentelemetry-java-contrib-jmx-metrics.jar by default).","title":"jar_path","type":"string"},"keystore_password":{"description":"The keystore password for SSL","title":"keystore_password","type":"string"},"keystore_path":{"description":"The keystore path for SSL","title":"keystore_path","type":"string"},"keystore_type":{"description":"The keystore type for SSL","title":"keystore_type","type":"string"},"log_level":{"description":"Log level used by the JMX metric gatherer. Should be one of:\n`\"trace\"`, `\"debug\"`, `\"info\"`, `\"warn\"`, `\"error\"`, `\"off\"`","title":"log_level","type":"string"},"otlp":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.jmxreceiver.otlpExporterConfig","description":"The exporter settings for","title":"otlp"},"password":{"description":"The JMX password","title":"password","type":"string"},"realm":{"description":"The SASL/DIGEST-MD5 realm","title":"realm","type":"string"},"remote_profile":{"description":"The JMX remote profile. Should be one of:\n`\"SASL/PLAIN\"`, `\"SASL/DIGEST-MD5\"`, `\"SASL/CRAM-MD5\"`, `\"TLS SASL/PLAIN\"`, `\"TLS SASL/DIGEST-MD5\"`, or\n`\"TLS SASL/CRAM-MD5\"`, though no enforcement is applied.","title":"remote_profile","type":"string"},"resource_attributes":{"description":"Map of resource attributes used by the Java SDK Autoconfigure to set resource attributes","patternProperties":{".*":{"type":"string"}},"title":"resource_attributes","type":"object"},"target_system":{"description":"The target system for the metric gatherer whose built in groovy script to run.","title":"target_system","type":"string"},"truststore_password":{"description":"The truststore password for SSL","title":"truststore_password","type":"string"},"truststore_path":{"description":"The truststore path for SSL","title":"truststore_path","type":"string"},"truststore_type":{"description":"The truststore type for SSL","title":"truststore_type","type":"string"},"username":{"description":"The JMX username","title":"username","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.jmxreceiver.otlpExporterConfig":{"additionalProperties":false,"properties":{"endpoint":{"title":"endpoint","type":"string"},"headers":{"patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"timeout":{"description":"Timeout is the timeout for every attempt to send data to the backend.","title":"timeout","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.journaldreceiver.JournaldConfig":{"additionalProperties":false,"description":"JournaldConfig defines configuration for the journald receiver","markdownDescription":"## `Journald Receiver`\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [alpha]: logs |\n| Distributions | [contrib], [observiq], [splunk], [sumo] |\n\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nParses Journald events from systemd journal.\nJournald receiver requires that:\n\n- the `journalctl` binary is present in the $PATH of the agent; and\n- the collector's user has sufficient permissions to access the journal through `journalctl`.\n\n## Configuration\n\n| Field | Default | Description |\n|-------------------------------------|--------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| `directory` | `/run/log/journal` or `/run/journal` | A directory containing journal files to read entries from |\n| `files` | | A list of journal files to read entries from |\n| `start_at` | `end` | At startup, where to start reading logs from the file. Options are beginning or end |\n| `units` | | A list of units to read entries from. See [Multiple filtering options](#multiple-filtering-options) examples. |\n| `matches` | | A list of matches to read entries from. See [Matches](#matches) and [Multiple filtering options](#multiple-filtering-options) examples. |\n| `priority` | `info` | Filter output by message priorities or priority ranges. See [Multiple filtering options](#multiple-filtering-options) examples. |\n| `grep` | | Filter output to entries where the MESSAGE= field matches the specified regular expression. See [Multiple filtering options](#multiple-filtering-options) examples. |\n| `storage` | none | The ID of a storage extension to be used to store cursors. Cursors allow the receiver to pick up where it left off in the case of a collector restart. If no storage extension is used, the receiver will manage cursors in memory only. |\n| `retry_on_failure.enabled` | `false` | If `true`, the receiver will pause reading a file and attempt to resend the current batch of logs if it encounters an error from downstream components. |\n| `retry_on_failure.initial_interval` | `1 second` | Time to wait after the first failure before retrying. |\n| `retry_on_failure.max_interval` | `30 seconds` | Upper bound on retry backoff interval. Once this value is reached the delay between consecutive retries will remain constant at the specified value. |\n| `retry_on_failure.max_elapsed_time` | `5 minutes` | Maximum amount of time (including retries) spent trying to send a logs batch to a downstream consumer. Once this value is reached, the data is discarded. Retrying never stops if set to `0`. |\n| `operators` | [] | An array of [operators](../../pkg/stanza/docs/operators/README.md#what-operators-are-available). See below for more details |\n\n### Operators\n\nEach operator performs a simple responsibility, such as parsing a timestamp or JSON. Chain together operators to process logs into a desired format.\n\n- Every operator has a `type`.\n- Every operator can be given a unique `id`. If you use the same type of operator more than once in a pipeline, you must specify an `id`. Otherwise, the `id` defaults to the value of `type`.\n- Operators will output to the next operator in the pipeline. The last operator in the pipeline will emit from the receiver. Optionally, the `output` parameter can be used to specify the `id` of another operator to which logs will be passed directly.\n- Only parsers and general purpose operators should be used.\n\n### Example Configurations\n\n```yaml\nreceivers:\n journald:\n directory: /run/log/journal\n units:\n - ssh\n - kubelet\n - docker\n - containerd\n priority: info\n```\n\n#### Matches\n\nThe following configuration:\n\n```yaml\n- type: journald_input\n matches:\n - _SYSTEMD_UNIT: ssh\n - _SYSTEMD_UNIT: kubelet\n _UID: \"1000\"\n```\n\nwill be passed to `journalctl` as the following arguments: `journalctl ... _SYSTEMD_UNIT=ssh + _SYSTEMD_UNIT=kubelet _UID=1000`,\nwhich is going to retrieve all entries which match at least one of the following rules:\n\n- `_SYSTEMD_UNIT` is `ssh`\n- `_SYSTEMD_UNIT` is `kubelet` and `_UID` is `1000`\n\n#### Multiple filtering options\n\nIn case of using multiple following options, conditions between them are logically `AND`ed and within them are logically `OR`ed:\n\n```text\n( priority )\nAND\n( units[0] OR units[1] OR units[2] OR ... units[U] )\nAND\n( matches[0] OR matches[1] OR matches[2] OR ... matches[M] )\nAND\n( grep )\n```\n\nConsider the following example:\n\n```yaml\n- type: journald_input\n matches:\n - _SYSTEMD_UNIT: ssh\n - _SYSTEMD_UNIT: kubelet\n _UID: \"1000\"\n units:\n - kubelet\n - systemd\n priority: info\n```\n\nThe above configuration will be passed to `journalctl` as the following arguments\n`journalctl ... --priority=info --unit=kubelet --unit=systemd _SYSTEMD_UNIT=ssh + _SYSTEMD_UNIT=kubelet _UID=1000`,\nwhich is going to effectively retrieve all entries which matches the following set of rules:\n\n- `_PRIORITY` is `6`, and\n- `_SYSTEMD_UNIT` is `kubelet` or `systemd`, and\n- entry matches at least one of the following rules:\n\n - `_SYSTEMD_UNIT` is `ssh`\n - `_SYSTEMD_UNIT` is `kubelet` and `_UID` is `1000`\n\n## Setup and deployment\n\nThe user running the collector must have enough permissions to access the journal; not granting them will lead to issues.\n\nWhen running in a containerized environment, differences in the systemd version running on the host and on the container may prevent access to logs due to different features and configurations (e.g. zstd compression, keyed hash etc).\n\n### Docker\n\nWhen running otelcol in a container, note that:\n\n1. the container must run as a user that has permission to access the logs\n2. the path to the log directory (`/run/log/journal`, `/var/log/journal`...) must be mounted in the container\n3. depending on your guest system, you might need to explicitly set the log directory in the configuration\n\nPlease note that *the official otelcol images do not contain the journald binary*; you will need to create your custom image or find one that does.\n\n### Linux packaging\n\nWhen installing otelcol as a linux package, you will most likely need to add the `otelcol-contrib` or `otel` user to the `systemd-journal` group. The exact user and group might vary depending on your package and linux distribution of choice.\n\nYou can test if the user has sufficient permissions by running something like (you might need to adjust according to available shell and opentelemetry user)\n\n```sh\nsudo su -s /bin/bash -c 'journalctl --lines 5' otelcol-contrib\n```\n\nif the permissions are set correctly you will see some logs, otherwise a clear error message.\n\n### Kubernetes\n\nSee the instructions for [Docker](#Docker) and adapt according to your Kubernetes distribution and node OS.","properties":{"operators":{"items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.pkg.stanza.operator.Config"},"title":"operators","type":"array"},"retry_on_failure":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.internal.coreinternal.consumerretry.Config","title":"retry_on_failure"},"storage":{"title":"storage","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.k8sclusterreceiver.Config":{"additionalProperties":false,"description":"Config defines configuration for kubernetes cluster receiver.","markdownDescription":"# Kubernetes Cluster Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: metrics |\n| Distributions | [contrib], [observiq], [splunk], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe Kubernetes Cluster receiver collects cluster-level metrics from the Kubernetes\nAPI server. It uses the K8s API to listen for updates. A single instance of this\nreceiver can be used to monitor a cluster.\n\nCurrently this receiver supports authentication via service accounts only. See [example](#example)\nfor more information.\n\n## Configuration\n\nThe following settings are required:\n\n- `auth_type` (default = `serviceAccount`): Determines how to authenticate to\nthe K8s API server. This can be one of `none` (for no auth), `serviceAccount`\n(to use the standard service account token provided to the agent pod), or\n`kubeConfig` to use credentials from `~/.kube/config`.\n\nThe following settings are optional:\n\n- `collection_interval` (default = `10s`): This receiver continuously watches\nfor events using K8s API. However, the metrics collected are emitted only\nonce every collection interval. `collection_interval` will determine the\nfrequency at which metrics are emitted by this receiver.\n- `node_conditions_to_report` (default = `[Ready]`): An array of node\nconditions this receiver should report. See\n[here](https://kubernetes.io/docs/concepts/architecture/nodes/#condition) for\nlist of node conditions. The receiver will emit one metric per entry in the\narray.\n- `distribution` (default = `kubernetes`): The Kubernetes distribution being used\nby the cluster. Currently supported versions are `kubernetes` and `openshift`. Setting\nthe value to `openshift` enables OpenShift specific metrics in addition to standard\nkubernetes ones.\n- `allocatable_types_to_report` (default = `[]`): An array of allocatable resource types this receiver should report.\nThe following allocatable resource types are available.\n - cpu\n - memory\n - ephemeral-storage\n - storage\n\nExample:\n\n```yaml\n k8s_cluster:\n auth_type: kubeConfig\n node_conditions_to_report: [Ready, MemoryPressure]\n allocatable_types_to_report: [cpu, memory]\n```\n\nThe full list of settings exposed for this receiver are documented [here](./config.go)\nwith detailed sample configurations [here](./testdata/config.yaml).\n\n### node_conditions_to_report\n\nFor example, with the config below the receiver will emit two metrics\n`k8s.node.condition_ready` and `k8s.node.condition_memory_pressure`, one\nfor each condition in the config. The value will be `1` if the `ConditionStatus` for the\ncorresponding `Condition` is `True`, `0` if it is `False` and -1 if it is `Unknown`.\n\n```yaml\n...\nk8s_cluster:\n node_conditions_to_report:\n - Ready\n - MemoryPressure\n...\n```\n\n### metadata_exporters\n\nA list of metadata exporters to which metadata being collected by this receiver\nshould be synced. Exporters specified in this list are expected to implement the\nfollowing interface. If an exporter that does not implement the interface is listed,\nstartup will fail.\n\n```yaml\ntype MetadataExporter interface {\n ConsumeMetadata(metadata []*MetadataUpdate) error\n}\n\ntype MetadataUpdate struct {\n ResourceIDKey string\n ResourceID ResourceID\n MetadataDelta\n}\n\ntype MetadataDelta struct {\n MetadataToAdd map[string]string\n MetadataToRemove map[string]string\n MetadataToUpdate map[string]string\n}\n```\n\nSee [here](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/experimentalmetricmetadata/metadata.go) for details about the above types.\n\n\n## Example\n\nHere is an example deployment of the collector that sets up this receiver along with\nthe logging exporter.\n\nFollow the below sections to setup various Kubernetes resources required for the deployment.\n\n### Configuration\n\nCreate a ConfigMap with the config for `otelcontribcol`:\n\n```bash\ncat \u003c\u003cEOF | kubectl apply -f -\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: otelcontribcol\n labels:\n app: otelcontribcol\ndata:\n config.yaml: |\n receivers:\n k8s_cluster:\n collection_interval: 10s\n exporters:\n logging:\n service:\n pipelines:\n metrics:\n receivers: [k8s_cluster]\n exporters: [logging]\nEOF\n```\n\n### Service Account\n\nCreate a service account that the collector should use.\n\n```bash\n\u003c\u003cEOF | kubectl apply -f -\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels:\n app: otelcontribcol\n name: otelcontribcol\nEOF\n```\n\n### RBAC\n\nUse the below commands to create a `ClusterRole` with required permissions and a \n`ClusterRoleBinding` to grant the role to the service account created above.\n\n```bash\n\u003c\u003cEOF | kubectl apply -f -\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: otelcontribcol\n labels:\n app: otelcontribcol\nrules:\n- apiGroups:\n - \"\"\n resources:\n - events\n - namespaces\n - namespaces/status\n - nodes\n - nodes/spec\n - pods\n - pods/status\n - replicationcontrollers\n - replicationcontrollers/status\n - resourcequotas\n - services\n verbs:\n - get\n - list\n - watch\n- apiGroups:\n - apps\n resources:\n - daemonsets\n - deployments\n - replicasets\n - statefulsets\n verbs:\n - get\n - list\n - watch\n- apiGroups:\n - extensions\n resources:\n - daemonsets\n - deployments\n - replicasets\n verbs:\n - get\n - list\n - watch\n- apiGroups:\n - batch\n resources:\n - jobs\n - cronjobs\n verbs:\n - get\n - list\n - watch\n- apiGroups:\n - autoscaling\n resources:\n - horizontalpodautoscalers\n verbs:\n - get\n - list\n - watch\nEOF\n```\n\n```bash\n\u003c\u003cEOF | kubectl apply -f -\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: otelcontribcol\n labels:\n app: otelcontribcol\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: otelcontribcol\nsubjects:\n- kind: ServiceAccount\n name: otelcontribcol\n namespace: default\nEOF\n```\n\n### Deployment\n\nCreate a [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) to deploy the collector.\n\n```bash\n\u003c\u003cEOF | kubectl apply -f -\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: otelcontribcol\n labels:\n app: otelcontribcol\nspec:\n replicas: 1\n selector:\n matchLabels:\n app: otelcontribcol\n template:\n metadata:\n labels:\n app: otelcontribcol\n spec:\n serviceAccountName: otelcontribcol\n containers:\n - name: otelcontribcol\n image: otel/opentelemetry-collector-contrib\n args: [\"--config\", \"/etc/config/config.yaml\"]\n volumeMounts:\n - name: config\n mountPath: /etc/config\n imagePullPolicy: IfNotPresent\n volumes:\n - name: config\n configMap:\n name: otelcontribcol\nEOF\n```\n\n### OpenShift\n\nYou can enable OpenShift support to collect OpenShift specific metrics in addition to the default\nkubernetes ones. To do this, set the `distribution` key to `openshift`.\n\nExample:\n\n```yaml\n k8s_cluster:\n distribution: openshift\n```\n\nAdd the following rules to your ClusterRole:\n\n```yaml\n- apigroups:\n - quota.openshift.io\n resources:\n - clusterresourcequotas\n verbs:\n - get\n - list\n - watch\n```","properties":{"allocatable_types_to_report":{"description":"Allocate resource types to report. See all resource types, see\nhere: https://kubernetes.io/docs/concepts/architecture/nodes/#capacity","items":{"type":"string"},"title":"allocatable_types_to_report","type":"array"},"auth_type":{"description":"How to authenticate to the K8s API server. This can be one of `none`\n(for no auth), `serviceAccount` (to use the standard service account\ntoken provided to the agent pod), or `kubeConfig` to use credentials\nfrom `~/.kube/config`.","title":"auth_type","type":"string"},"collection_interval":{"description":"Collection interval for metrics.","title":"collection_interval","type":"string"},"distribution":{"description":"Whether OpenShift supprot should be enabled or not.","title":"distribution","type":"string"},"metadata_exporters":{"description":"List of exporters to which metadata from this receiver should be forwarded to.","items":{"type":"string"},"title":"metadata_exporters","type":"array"},"node_conditions_to_report":{"description":"Node condition types to report. See all condition types, see\nhere: https://kubernetes.io/docs/concepts/architecture/nodes/#condition.","items":{"type":"string"},"title":"node_conditions_to_report","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.k8seventsreceiver.Config":{"additionalProperties":false,"description":"Config defines configuration for kubernetes events receiver.","markdownDescription":"# Kubernetes Events Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [alpha]: logs |\n| Distributions | [contrib], [observiq], [splunk], [sumo] |\n\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe kubernetes Events receiver collects events from the Kubernetes\nAPI server. It collects all the new or updated events that come in.\n\nCurrently this receiver supports authentication via service accounts only.\nSee [example](#example) for more information.\n\n## Configuration\n\nThe following settings are optional:\n\n- `auth_type` (default = `serviceAccount`): Determines how to authenticate to\nthe K8s API server. This can be one of `none` (for no auth), `serviceAccount`\n(to use the standard service account token provided to the agent pod), or\n`kubeConfig` to use credentials from `~/.kube/config`.\n- `namespaces` (default = `all`): An array of `namespaces` to collect events from.\nThis receiver will continuously watch all the `namespaces` mentioned in the array for\nnew events.\n\nExamples:\n\n```yaml\n k8s_events:\n auth_type: kubeConfig\n namespaces: [default, my_namespace]\n```\n\nThe full list of settings exposed for this receiver are documented [here](./config.go)\nwith detailed sample configurations [here](./testdata/config.yaml).\n\n## Example\n\nHere is an example deployment of the collector that sets up this receiver along with\nthe [OTLP Exporter](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/otlpexporter/README.md).\n\nFollow the below sections to setup various Kubernetes resources required for the deployment.\n\n### Configuration\n\nCreate a ConfigMap with the config for `otelcontribcol`. Replace `OTLP_ENDPOINT`\nwith valid value.\n\n```bash\ncat \u003c\u003cEOF | kubectl apply -f -\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: otelcontribcol\n labels:\n app: otelcontribcol\ndata:\n config.yaml: |\n receivers:\n k8s_events:\n namespaces: [default, my_namespace]\n exporters:\n otlp:\n endpoint: \u003cOTLP_ENDPOINT\u003e\n tls:\n insecure: true\n\n service:\n pipelines:\n logs:\n receivers: [k8s_events]\n exporters: [otlp]\nEOF\n```\n\n### Service Account\n\nCreate a service account that the collector should use.\n\n```bash\n\u003c\u003cEOF | kubectl apply -f -\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels:\n app: otelcontribcol\n name: otelcontribcol\nEOF\n```\n\n### RBAC\n\nUse the below commands to create a `ClusterRole` with required permissions and a\n`ClusterRoleBinding` to grant the role to the service account created above.\n\n```bash\n\u003c\u003cEOF | kubectl apply -f -\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRole\nmetadata:\n name: otelcontribcol\n labels:\n app: otelcontribcol\nrules:\n- apiGroups:\n - \"\"\n resources:\n - events\n - namespaces\n - namespaces/status\n - nodes\n - nodes/spec\n - pods\n - pods/status\n - replicationcontrollers\n - replicationcontrollers/status\n - resourcequotas\n - services\n verbs:\n - get\n - list\n - watch\n- apiGroups:\n - apps\n resources:\n - daemonsets\n - deployments\n - replicasets\n - statefulsets\n verbs:\n - get\n - list\n - watch\n- apiGroups:\n - extensions\n resources:\n - daemonsets\n - deployments\n - replicasets\n verbs:\n - get\n - list\n - watch\n- apiGroups:\n - batch\n resources:\n - jobs\n - cronjobs\n verbs:\n - get\n - list\n - watch\n- apiGroups:\n - autoscaling\n resources:\n - horizontalpodautoscalers\n verbs:\n - get\n - list\n - watch\nEOF\n```\n\n```bash\n\u003c\u003cEOF | kubectl apply -f -\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n name: otelcontribcol\n labels:\n app: otelcontribcol\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: otelcontribcol\nsubjects:\n- kind: ServiceAccount\n name: otelcontribcol\n namespace: default\nEOF\n```\n\n### Deployment\n\nCreate a [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) to deploy the collector.\n\n```bash\n\u003c\u003cEOF | kubectl apply -f -\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: otelcontribcol\n labels:\n app: otelcontribcol\nspec:\n replicas: 1\n selector:\n matchLabels:\n app: otelcontribcol\n template:\n metadata:\n labels:\n app: otelcontribcol\n spec:\n serviceAccountName: otelcontribcol\n containers:\n - name: otelcontribcol\n image: otelcontribcol:latest # specify image\n args: [\"--config\", \"/etc/config/config.yaml\"]\n volumeMounts:\n - name: config\n mountPath: /etc/config\n imagePullPolicy: IfNotPresent\n volumes:\n - name: config\n configMap:\n name: otelcontribcol\nEOF\n```","properties":{"auth_type":{"description":"How to authenticate to the K8s API server. This can be one of `none`\n(for no auth), `serviceAccount` (to use the standard service account\ntoken provided to the agent pod), or `kubeConfig` to use credentials\nfrom `~/.kube/config`.","title":"auth_type","type":"string"},"namespaces":{"description":"List of ‘namespaces’ to collect events from.","items":{"type":"string"},"title":"namespaces","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.k8sobjectsreceiver.Config":{"additionalProperties":false,"properties":{"auth_type":{"description":"How to authenticate to the K8s API server. This can be one of `none`\n(for no auth), `serviceAccount` (to use the standard service account\ntoken provided to the agent pod), or `kubeConfig` to use credentials\nfrom `~/.kube/config`.","title":"auth_type","type":"string"},"objects":{"items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.k8sobjectsreceiver.K8sObjectsConfig"},"title":"objects","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.k8sobjectsreceiver.K8sObjectsConfig":{"additionalProperties":false,"markdownDescription":"# Kubernetes Objects Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [alpha]: logs |\n| Distributions | [contrib], [splunk], [sumo] |\n\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe kubernetes Objects receiver collects(pull/watch) objects from the Kubernetes API server.\n\nCurrently this receiver supports authentication via service accounts only.\nSee [example](#example) for more information.\n\n## Getting Started\n\nThe following is example configuration\n\n```yaml\n k8sobjects:\n auth_type: serviceAccount\n objects:\n - name: pods\n mode: pull\n label_selector: environment in (production),tier in (frontend)\n field_selector: status.phase=Running\n interval: 15m\n - name: events\n mode: watch\n group: events.k8s.io\n namespaces: [default]\n```\n\nBrief description of configuration properties:\n- `auth_type` (default = `serviceAccount`): Determines how to authenticate to\nthe K8s API server. This can be one of `none` (for no auth), `serviceAccount`\n(to use the standard service account token provided to the agent pod), or\n`kubeConfig` to use credentials from `~/.kube/config`.\n- `name`: Name of the resource object to collect\n- `mode`: define in which way it collects this type of object, either \"poll\" or \"watch\".\n - `pull` mode will read all objects of this type use the list API at an interval.\n - `watch` mode will setup a long connection using the watch API to just get updates.\n- `label_selector`: select objects by label(s)\n- `field_selector`: select objects by field(s)\n- `interval`: the interval at which object is pulled, default 60 minutes. Only useful for `pull` mode.\n- `resource_version` allows watch resources starting from a specific version (default = `1`). Only available for `watch` mode.\n- `namespaces`: An array of `namespaces` to collect events from. (default = `all`)\n- `group`: API group name. It is an optional config. When given resource object is present in multiple groups,\nuse this config to specify the group to select. By default, it will select the first group.\nFor example, `events` resource is available in both `v1` and `events.k8s.io/v1` APIGroup. In \nthis case, it will select `v1` by default.\n\n\nThe full list of settings exposed for this receiver are documented [here](./config.go)\nwith detailed sample configurations [here](./testdata/config.yaml).\n\nFollow the below sections to setup various Kubernetes resources required for the deployment.\n\n### Configuration\n\nCreate a ConfigMap with the config for `otelcontribcol`. Replace `OTLP_ENDPOINT`\nwith valid value.\n\n```bash\ncat \u003c\u003cEOF | kubectl apply -f -\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: otelcontribcol\n labels:\n app: otelcontribcol\ndata:\n config.yaml: |\n receivers:\n k8sobjects:\n objects:\n - name: pods\n mode: pull\n - name: events\n mode: watch\n exporters:\n otlp:\n endpoint: \u003cOTLP_ENDPOINT\u003e\n tls:\n insecure: true\n\n service:\n pipelines:\n logs:\n receivers: [k8sobjects]\n exporters: [otlp]\nEOF\n```\n\n### Service Account\n\nCreate a service account that the collector should use.\n\n```bash\n\u003c\u003cEOF | kubectl apply -f -\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels:\n app: otelcontribcol\n name: otelcontribcol\nEOF\n```\n\n### RBAC\n\nUse the below commands to create a `ClusterRole` with required permissions and a\n`ClusterRoleBinding` to grant the role to the service account created above.\nFollowing config will work for collecting pods and events only. You need to add\nappropriate rule for collecting other objects.\n\n```bash\n\u003c\u003cEOF | kubectl apply -f -\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: otelcontribcol\n labels:\n app: otelcontribcol\nrules:\n- apiGroups:\n - \"\"\n resources:\n - events\n - pods\n verbs:\n - get\n - list\n - watch\n- apiGroups: \n - \"events.k8s.io\"\n resources:\n - events\n verbs:\n - watch\nEOF\n```\n\n```bash\n\u003c\u003cEOF | kubectl apply -f -\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: otelcontribcol\n labels:\n app: otelcontribcol\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: otelcontribcol\nsubjects:\n- kind: ServiceAccount\n name: otelcontribcol\n namespace: default\nEOF\n```\n\n### Deployment\n\nCreate a [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) to deploy the collector.\nNote: This receiver must be deployed as one replica, otherwise it'll be producing duplicated data.\n\n```bash\n\u003c\u003cEOF | kubectl apply -f -\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: otelcontribcol\n labels:\n app: otelcontribcol\nspec:\n replicas: 1\n selector:\n matchLabels:\n app: otelcontribcol\n template:\n metadata:\n labels:\n app: otelcontribcol\n spec:\n serviceAccountName: otelcontribcol\n containers:\n - name: otelcontribcol\n image: otelcontribcol:latest # specify image\n args: [\"--config\", \"/etc/config/config.yaml\"]\n volumeMounts:\n - name: config\n mountPath: /etc/config\n imagePullPolicy: IfNotPresent\n volumes:\n - name: config\n configMap:\n name: otelcontribcol\nEOF\n```\n\n## Troubleshooting\n\nIf receiver returns error similar to below, make sure that resource is added to `ClusterRole`.\n```\n{\"kind\": \"receiver\", \"name\": \"k8sobjects\", \"pipeline\": \"logs\", \"resource\": \"events.k8s.io/v1, Resource=events\", \"error\": \"unknown\"}\n```","properties":{"field_selector":{"title":"field_selector","type":"string"},"group":{"title":"group","type":"string"},"interval":{"title":"interval","type":"string"},"label_selector":{"title":"label_selector","type":"string"},"mode":{"title":"mode","type":"string"},"name":{"title":"name","type":"string"},"namespaces":{"items":{"type":"string"},"title":"namespaces","type":"array"},"resource_version":{"title":"resource_version","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kafkametricsreceiver.Config":{"additionalProperties":false,"description":"Config represents user settings for kafkametrics receiver","markdownDescription":"# Kafka Metrics Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: metrics |\n| Distributions | [contrib], [splunk], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nKafka metrics receiver collects kafka metrics (brokers, topics, partitions, consumer groups) from kafka server,\nconverting into otlp.\n\n## Getting Started\n\nRequired settings (no defaults):\n\n- `protocol_version`: Kafka protocol version\n- `scrapers`: any combination of the following scrapers can be enabled.\n - `topics`\n - `consumers`\n - `brokers`\n \nMetrics collected by the associated scraper are listed [here](metadata.yaml)\n\nOptional Settings (with defaults):\n\n- `brokers` (default = localhost:9092): the list of brokers to read from.\n- `topic_match` (default = ^[^_].*$): regex pattern of topics to filter on metrics collection. The default filter excludes internal topics (starting with `_`).\n- `group_match` (default = .*): regex pattern of consumer groups to filter on for metrics.\n- `client_id` (default = otel-metrics-receiver): consumer client id\n- `collection_interval` (default = 1m): frequency of metric collection/scraping.\n- `initial_delay` (default = `1s`): defines how long this receiver waits before starting.\n- `auth` (default none)\n - `plain_text`\n - `username`: The username to use.\n - `password`: The password to use\n - `tls`\n - `ca_file`: path to the CA cert. For a client this verifies the server certificate. Should only be used\n if `insecure` is set to true.\n - `cert_file`: path to the TLS cert to use for TLS required connections. Should only be used if `insecure` is\n set to true.\n - `key_file`: path to the TLS key to use for TLS required connections. Should only be used if `insecure` is set\n to true.\n - `insecure` (default = false): Disable verifying the server's certificate chain and host\n name (`InsecureSkipVerify` in the tls config)\n - `server_name_override`: ServerName indicates the name of the server requested by the client in order to\n support virtual hosting.\n - `kerberos`\n - `service_name`: Kerberos service name\n - `realm`: Kerberos realm\n - `use_keytab`: Use of keytab instead of password, if this is true, keytab file will be used instead of\n password\n - `username`: The Kerberos username used for authenticate with KDC\n - `password`: The Kerberos password used for authenticate with KDC\n - `config_file`: Path to Kerberos configuration. i.e /etc/krb5.conf\n - `keytab_file`: Path to keytab file. i.e /etc/security/kafka.keytab\n\n## Examples:\n\n1) Basic configuration with all scrapers:\n\n```yaml\nreceivers:\n kafkametrics:\n protocol_version: 2.0.0\n scrapers:\n - brokers\n - topics\n - consumers\n```\n\n2) Configuration with more optional settings:\n\nFor this example:\n- collection interval is 5 secs.\n\n```yaml\nreceivers:\n kafkametrics:\n brokers: 10.10.10.10:9092\n protocol_version: 2.0.0\n scrapers:\n - brokers\n - topics\n - consumers\n auth:\n tls:\n ca_file: ca.pem\n cert_file: cert.pem\n key_file: key.pem\n collection_interval: 5s\n```","properties":{"auth":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.kafkaexporter.Authentication","description":"Authentication data","title":"auth"},"brokers":{"description":"The list of kafka brokers (default localhost:9092)","items":{"type":"string"},"title":"brokers","type":"array"},"client_id":{"description":"ClientID is the id associated with the consumer that reads from topics in kafka.","title":"client_id","type":"string"},"collection_interval":{"title":"collection_interval","type":"string"},"group_match":{"description":"GroupMatch consumer groups to collect on","title":"group_match","type":"string"},"initial_delay":{"title":"initial_delay","type":"string"},"metrics":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kafkametricsreceiver.internal.metadata.MetricsConfig","title":"metrics"},"protocol_version":{"description":"ProtocolVersion Kafka protocol version","title":"protocol_version","type":"string"},"scrapers":{"description":"Scrapers defines which metric data points to be captured from kafka","items":{"type":"string"},"title":"scrapers","type":"array"},"topic_match":{"description":"TopicMatch topics to collect metrics on","title":"topic_match","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kafkametricsreceiver.internal.metadata.MetricConfig":{"additionalProperties":false,"description":"MetricConfig provides common config for a particular metric.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kafkametricsreceiver.internal.metadata.MetricsConfig":{"additionalProperties":false,"description":"MetricsConfig provides config for kafkametrics metrics.","properties":{"kafka.brokers":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kafkametricsreceiver.internal.metadata.MetricConfig","title":"kafka.brokers"},"kafka.consumer_group.lag":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kafkametricsreceiver.internal.metadata.MetricConfig","title":"kafka.consumer_group.lag"},"kafka.consumer_group.lag_sum":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kafkametricsreceiver.internal.metadata.MetricConfig","title":"kafka.consumer_group.lag_sum"},"kafka.consumer_group.members":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kafkametricsreceiver.internal.metadata.MetricConfig","title":"kafka.consumer_group.members"},"kafka.consumer_group.offset":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kafkametricsreceiver.internal.metadata.MetricConfig","title":"kafka.consumer_group.offset"},"kafka.consumer_group.offset_sum":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kafkametricsreceiver.internal.metadata.MetricConfig","title":"kafka.consumer_group.offset_sum"},"kafka.partition.current_offset":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kafkametricsreceiver.internal.metadata.MetricConfig","title":"kafka.partition.current_offset"},"kafka.partition.oldest_offset":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kafkametricsreceiver.internal.metadata.MetricConfig","title":"kafka.partition.oldest_offset"},"kafka.partition.replicas":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kafkametricsreceiver.internal.metadata.MetricConfig","title":"kafka.partition.replicas"},"kafka.partition.replicas_in_sync":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kafkametricsreceiver.internal.metadata.MetricConfig","title":"kafka.partition.replicas_in_sync"},"kafka.topic.partitions":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kafkametricsreceiver.internal.metadata.MetricConfig","title":"kafka.topic.partitions"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kafkareceiver.AutoCommit":{"additionalProperties":false,"properties":{"enable":{"description":"Whether or not to auto-commit updated offsets back to the broker.\n(default enabled).","title":"enable","type":"boolean"},"interval":{"description":"How frequently to commit updated offsets. Ineffective unless\nauto-commit is enabled (default 1s)","title":"interval","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kafkareceiver.Config":{"additionalProperties":false,"description":"Config defines configuration for Kafka receiver.","properties":{"auth":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.kafkaexporter.Authentication","title":"auth"},"autocommit":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kafkareceiver.AutoCommit","description":"Controls the auto-commit functionality","title":"autocommit"},"brokers":{"description":"The list of kafka brokers (default localhost:9092)","items":{"type":"string"},"title":"brokers","type":"array"},"client_id":{"description":"The consumer client ID that receiver will use (default \"otel-collector\")","title":"client_id","type":"string"},"encoding":{"description":"Encoding of the messages (default \"otlp_proto\")","title":"encoding","type":"string"},"group_id":{"description":"The consumer group that receiver will be consuming messages from (default \"otel-collector\")","title":"group_id","type":"string"},"initial_offset":{"description":"The initial offset to use if no offset was previously committed.\nMust be `latest` or `earliest` (default \"latest\").","title":"initial_offset","type":"string"},"message_marking":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kafkareceiver.MessageMarking","description":"Controls the way the messages are marked as consumed","title":"message_marking"},"metadata":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.kafkaexporter.Metadata","description":"Metadata is the namespace for metadata management properties used by the\nClient, and shared by the Producer/Consumer.","title":"metadata"},"protocol_version":{"description":"Kafka protocol version","title":"protocol_version","type":"string"},"topic":{"description":"The name of the kafka topic to consume from (default \"otlp_spans\")","title":"topic","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kafkareceiver.MessageMarking":{"additionalProperties":false,"properties":{"after":{"description":"If true, the messages are marked after the pipeline execution","title":"after","type":"boolean"},"on_error":{"description":"If false, only the successfully processed messages are marked, it has no impact if\nAfter is set to false.\nNote: this can block the entire partition in case a message processing returns\na permanent error.","title":"on_error","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.Config":{"additionalProperties":false,"markdownDescription":"# Kubelet Stats Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: metrics |\n| Distributions | [contrib], [observiq], [splunk], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe Kubelet Stats Receiver pulls pod metrics from the API server on a kubelet\nand sends it down the metric pipeline for further processing.\n\n## Configuration\n\nA kubelet runs on a kubernetes node and has an API server to which this\nreceiver connects. To configure this receiver, you have to tell it how\nto connect and authenticate to the API server and how often to collect data\nand send it to the next consumer.\n\nKubelet Stats Receiver supports both secure Kubelet endpoint exposed at port 10250 by default and read-only\nKubelet endpoint exposed at port 10255. If `auth_type` set to `none`, the read-only endpoint will be used. The secure \nendpoint will be used if `auth_type` set to any of the following values:\n\n- `tls` tells the receiver to use TLS for auth and requires that the fields\n`ca_file`, `key_file`, and `cert_file` also be set.\n- `serviceAccount` tells this receiver to use the default service account token\nto authenticate to the kubelet API.\n- `kubeConfig` tells this receiver to use the kubeconfig file (KUBECONFIG env variable or ~/.kube/config)\nto authenticate and use API server proxy to access the kubelet API.\n- `initial_delay` (default = `1s`): defines how long this receiver waits before starting.\n\n### TLS Example\n\n```yaml\nreceivers:\n kubeletstats:\n collection_interval: 20s\n initial_delay: 1s\n auth_type: \"tls\"\n ca_file: \"/path/to/ca.crt\"\n key_file: \"/path/to/apiserver.key\"\n cert_file: \"/path/to/apiserver.crt\"\n endpoint: \"https://192.168.64.1:10250\"\n insecure_skip_verify: true\nexporters:\n file:\n path: \"fileexporter.txt\"\nservice:\n pipelines:\n metrics:\n receivers: [kubeletstats]\n exporters: [file]\n```\n\n### Service Account Authentication Example\n\nAlthough it's possible to use kubernetes' hostNetwork feature to talk to the\nkubelet api from a pod, the preferred approach is to use the downward API.\n\nMake sure the pod spec sets the node name as follows:\n\n```yaml\nenv:\n - name: K8S_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n```\n\nThen the otel config can reference the `K8S_NODE_NAME` environment variable:\n\n```yaml\nreceivers:\n kubeletstats:\n collection_interval: 20s\n auth_type: \"serviceAccount\"\n endpoint: \"https://${env:K8S_NODE_NAME}:10250\"\n insecure_skip_verify: true\nexporters:\n file:\n path: \"fileexporter.txt\"\nservice:\n pipelines:\n metrics:\n receivers: [kubeletstats]\n exporters: [file]\n```\n\nNote: a missing or empty `endpoint` will cause the hostname on which the\ncollector is running to be used as the endpoint. If the hostNetwork flag is\nset, and the collector is running in a pod, this hostname will resolve to the\nnode's network namespace.\n\n### Read Only Endpoint Example\n\nThe following config can be used to collect Kubelet metrics from read-only endpoint:\n\n```yaml\nreceivers:\n kubeletstats:\n collection_interval: 20s\n auth_type: \"none\"\n endpoint: \"http://${env:K8S_NODE_NAME}:10255\"\nexporters:\n file:\n path: \"fileexporter.txt\"\nservice:\n pipelines:\n metrics:\n receivers: [kubeletstats]\n exporters: [file]\n```\n\n### Kubeconfig example\n\nThe following config can be used to collect Kubelet metrics from read-only endpoint, proxied by the API server:\n\n```yaml\nreceivers:\n kubeletstats:\n collection_interval: 20s\n auth_type: \"kubeConfig\"\n insecure_skip_verify: true\n endpoint: \"${env:K8S_NODE_NAME}\"\nexporters:\n file:\n path: \"fileexporter.txt\"\nservice:\n pipelines:\n metrics:\n receivers: [kubeletstats]\n exporters: [file]\n```\nNote that using `auth_type` `kubeConfig`, the endpoint should only be the node name as the communication to the kubelet is proxied by the API server configured in the `kubeConfig`.\n`insecure_skip_verify` still applies by overriding the `kubeConfig` settings.\n\n### Extra metadata labels\n\nBy default, all produced metrics get resource labels based on what kubelet /stats/summary endpoint provides.\nFor some use cases it might be not enough. So it's possible to leverage other endpoints to fetch\nadditional metadata entities and set them as extra labels on metric resource. Currently supported metadata\ninclude the following:\n\n- `container.id` - to augment metrics with Container ID label obtained from container statuses exposed via `/pods`.\n- `k8s.volume.type` - to collect volume type from the Pod spec exposed via `/pods` and have it as a label on volume metrics.\nIf there's more information available from the endpoint than just volume type, those are sycned as well depending on\nthe available fields and the type of volume. For example, `aws.volume.id` would be synced from `awsElasticBlockStore`\nand `gcp.pd.name` is synced for `gcePersistentDisk`.\n\nIf you want to have `container.id` label added to your metrics, use `extra_metadata_labels` field to enable\nit, for example:\n\n```yaml\nreceivers:\n kubeletstats:\n collection_interval: 10s\n auth_type: \"serviceAccount\"\n endpoint: \"${env:K8S_NODE_NAME}:10250\"\n insecure_skip_verify: true\n extra_metadata_labels:\n - container.id\n```\n\nIf `extra_metadata_labels` is not set, no additional API calls is done to fetch extra metadata.\n\n#### Collecting Additional Volume Metadata\n\nWhen dealing with Persistent Volume Claims, it is possible to optionally sync metdadata from the underlying\nstorage resource rather than just the volume claim. This is achieved by talking to the Kubernetes API. Below\nis an example, configuration to achieve this.\n\n```yaml\nreceivers:\n kubeletstats:\n collection_interval: 10s\n auth_type: \"serviceAccount\"\n endpoint: \"${env:K8S_NODE_NAME}:10250\"\n insecure_skip_verify: true\n extra_metadata_labels:\n - k8s.volume.type\n k8s_api_config:\n auth_type: serviceAccount\n```\n\nIf `k8s_api_config` set, the receiver will attempt to collect metadata from underlying storage resources for\nPersistent Volume Claims. For example, if a Pod is using a PVC backed by an EBS instance on AWS, the receiver\nwould set the `k8s.volume.type` label to be `awsElasticBlockStore` rather than `persistentVolumeClaim`.\n\n### Metric Groups\n\nA list of metric groups from which metrics should be collected. By default, metrics from containers,\npods and nodes will be collected. If `metric_groups` is set, only metrics from the listed groups\nwill be collected. Valid groups are `container`, `pod`, `node` and `volume`. For example, if you're\nlooking to collect only `node` and `pod` metrics from the receiver use the following configuration.\n\n```yaml\nreceivers:\n kubeletstats:\n collection_interval: 10s\n auth_type: \"serviceAccount\"\n endpoint: \"${env:K8S_NODE_NAME}:10250\"\n insecure_skip_verify: true\n metric_groups:\n - node\n - pod\n```\n\n### Optional parameters\n\nThe following parameters can also be specified:\n\n- `collection_interval` (default = `10s`): The interval at which to collect data.\n- `insecure_skip_verify` (default = `false`): Whether or not to skip certificate verification.\n\nThe full list of settings exposed for this receiver are documented [here](./config.go)\nwith detailed sample configurations [here](./testdata/config.yaml).\n\n## Metrics\n\nDetails about the metrics produced by this receiver can be found in [metadata.yaml](./metadata.yaml) with further documentation in [documentation.md](./documentation.md)","properties":{"auth_type":{"description":"How to authenticate to the K8s API server. This can be one of `none`\n(for no auth), `serviceAccount` (to use the standard service account\ntoken provided to the agent pod), or `kubeConfig` to use credentials\nfrom `~/.kube/config`.","title":"auth_type","type":"string"},"ca_file":{"description":"Path to the CA cert. For a client this verifies the server certificate.\nFor a server this verifies client certificates. If empty uses system root CA.\n(optional)","title":"ca_file","type":"string"},"ca_pem":{"description":"In memory PEM encoded cert. (optional)","title":"ca_pem","type":"string"},"cert_file":{"description":"Path to the TLS cert to use for TLS required connections. (optional)","title":"cert_file","type":"string"},"cert_pem":{"description":"In memory PEM encoded TLS cert to use for TLS required connections. (optional)","title":"cert_pem","type":"string"},"collection_interval":{"title":"collection_interval","type":"string"},"endpoint":{"description":"Endpoint configures the address for this network connection.\nThe address has the form \"host:port\". The host must be a literal IP address, or a host name that can be\nresolved to IP addresses. The port must be a literal port number or a service name.\nIf the host is a literal IPv6 address it must be enclosed in square brackets, as in \"[2001:db8::1]:80\" or\n\"[fe80::1%zone]:80\". The zone specifies the scope of the literal IPv6 address as defined in RFC 4007.","title":"endpoint","type":"string"},"extra_metadata_labels":{"description":"ExtraMetadataLabels contains list of extra metadata that should be taken from /pods endpoint\nand put as extra labels on metrics resource.\nNo additional metadata is fetched by default, so there are no extra calls to /pods endpoint.\nSupported values include container.id and k8s.volume.type.","items":{"type":"string"},"title":"extra_metadata_labels","type":"array"},"initial_delay":{"title":"initial_delay","type":"string"},"insecure_skip_verify":{"description":"InsecureSkipVerify controls whether the client verifies the server's\ncertificate chain and host name.","title":"insecure_skip_verify","type":"boolean"},"k8s_api_config":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.internal.k8sconfig.APIConfig","description":"Configuration of the Kubernetes API client.","title":"k8s_api_config"},"key_file":{"description":"Path to the TLS key to use for TLS required connections. (optional)","title":"key_file","type":"string"},"key_pem":{"description":"In memory PEM encoded TLS key to use for TLS required connections. (optional)","title":"key_pem","type":"string"},"max_version":{"description":"MaxVersion sets the maximum TLS version that is acceptable.\nIf not set, refer to crypto/tls for defaults. (optional)","title":"max_version","type":"string"},"metric_groups":{"description":"MetricGroupsToCollect provides a list of metrics groups to collect metrics from.\n\"container\", \"pod\", \"node\" and \"volume\" are the only valid groups.","items":{"type":"string"},"title":"metric_groups","type":"array"},"metrics":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricsConfig","title":"metrics"},"min_version":{"description":"MinVersion sets the minimum TLS version that is acceptable.\nIf not set, TLS 1.2 will be used. (optional)","title":"min_version","type":"string"},"reload_interval":{"description":"ReloadInterval specifies the duration after which the certificate will be reloaded\nIf not set, it will never be reloaded (optional)","title":"reload_interval","type":"string"},"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.ResourceAttributesConfig","title":"resource_attributes"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig":{"additionalProperties":false,"description":"MetricConfig provides common config for a particular metric.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricsConfig":{"additionalProperties":false,"description":"MetricsConfig provides config for kubeletstats metrics.","properties":{"container.cpu.time":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"container.cpu.time"},"container.cpu.utilization":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"container.cpu.utilization"},"container.filesystem.available":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"container.filesystem.available"},"container.filesystem.capacity":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"container.filesystem.capacity"},"container.filesystem.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"container.filesystem.usage"},"container.memory.available":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.available"},"container.memory.major_page_faults":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.major_page_faults"},"container.memory.page_faults":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.page_faults"},"container.memory.rss":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.rss"},"container.memory.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.usage"},"container.memory.working_set":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"container.memory.working_set"},"k8s.node.cpu.time":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"k8s.node.cpu.time"},"k8s.node.cpu.utilization":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"k8s.node.cpu.utilization"},"k8s.node.filesystem.available":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"k8s.node.filesystem.available"},"k8s.node.filesystem.capacity":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"k8s.node.filesystem.capacity"},"k8s.node.filesystem.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"k8s.node.filesystem.usage"},"k8s.node.memory.available":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"k8s.node.memory.available"},"k8s.node.memory.major_page_faults":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"k8s.node.memory.major_page_faults"},"k8s.node.memory.page_faults":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"k8s.node.memory.page_faults"},"k8s.node.memory.rss":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"k8s.node.memory.rss"},"k8s.node.memory.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"k8s.node.memory.usage"},"k8s.node.memory.working_set":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"k8s.node.memory.working_set"},"k8s.node.network.errors":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"k8s.node.network.errors"},"k8s.node.network.io":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"k8s.node.network.io"},"k8s.pod.cpu.time":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"k8s.pod.cpu.time"},"k8s.pod.cpu.utilization":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"k8s.pod.cpu.utilization"},"k8s.pod.filesystem.available":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"k8s.pod.filesystem.available"},"k8s.pod.filesystem.capacity":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"k8s.pod.filesystem.capacity"},"k8s.pod.filesystem.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"k8s.pod.filesystem.usage"},"k8s.pod.memory.available":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"k8s.pod.memory.available"},"k8s.pod.memory.major_page_faults":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"k8s.pod.memory.major_page_faults"},"k8s.pod.memory.page_faults":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"k8s.pod.memory.page_faults"},"k8s.pod.memory.rss":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"k8s.pod.memory.rss"},"k8s.pod.memory.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"k8s.pod.memory.usage"},"k8s.pod.memory.working_set":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"k8s.pod.memory.working_set"},"k8s.pod.network.errors":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"k8s.pod.network.errors"},"k8s.pod.network.io":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"k8s.pod.network.io"},"k8s.volume.available":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"k8s.volume.available"},"k8s.volume.capacity":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"k8s.volume.capacity"},"k8s.volume.inodes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"k8s.volume.inodes"},"k8s.volume.inodes.free":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"k8s.volume.inodes.free"},"k8s.volume.inodes.used":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.MetricConfig","title":"k8s.volume.inodes.used"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig provides common config for a particular resource attribute.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.ResourceAttributesConfig":{"additionalProperties":false,"description":"ResourceAttributesConfig provides config for kubeletstats resource attributes.","properties":{"aws.volume.id":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.ResourceAttributeConfig","title":"aws.volume.id"},"container.id":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.ResourceAttributeConfig","title":"container.id"},"fs.type":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.ResourceAttributeConfig","title":"fs.type"},"gce.pd.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.ResourceAttributeConfig","title":"gce.pd.name"},"glusterfs.endpoints.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.ResourceAttributeConfig","title":"glusterfs.endpoints.name"},"glusterfs.path":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.ResourceAttributeConfig","title":"glusterfs.path"},"k8s.container.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.ResourceAttributeConfig","title":"k8s.container.name"},"k8s.namespace.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.ResourceAttributeConfig","title":"k8s.namespace.name"},"k8s.node.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.ResourceAttributeConfig","title":"k8s.node.name"},"k8s.persistentvolumeclaim.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.ResourceAttributeConfig","title":"k8s.persistentvolumeclaim.name"},"k8s.pod.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.ResourceAttributeConfig","title":"k8s.pod.name"},"k8s.pod.uid":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.ResourceAttributeConfig","title":"k8s.pod.uid"},"k8s.volume.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.ResourceAttributeConfig","title":"k8s.volume.name"},"k8s.volume.type":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.ResourceAttributeConfig","title":"k8s.volume.type"},"partition":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.internal.metadata.ResourceAttributeConfig","title":"partition"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.lokireceiver.Config":{"additionalProperties":false,"description":"Config defines configuration for the lokireceiver receiver.","properties":{"protocols":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.lokireceiver.Protocols","title":"protocols"},"use_incoming_timestamp":{"title":"use_incoming_timestamp","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.lokireceiver.Protocols":{"additionalProperties":false,"description":"Protocols is the configuration for the supported protocols.","properties":{"grpc":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configgrpc.GRPCServerSettings","title":"grpc"},"http":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.confighttp.HTTPServerSettings","title":"http"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.memcachedreceiver.Config":{"additionalProperties":false,"markdownDescription":"# Memcached Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: metrics |\n| Distributions | [contrib], [observiq], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThis receiver can fetch stats from a Memcached instance using the [stats\ncommand](https://github.com/memcached/memcached/wiki/Commands#statistics). A\ndetailed description of all the stats available is at\nhttps://github.com/memcached/memcached/blob/master/doc/protocol.txt#L1159.\n\n## Details\n\n## Configuration\n\n\u003e :information_source: This receiver is in beta and configuration fields are subject to change.\n\nThe following settings are required:\n\n- `endpoint` (default: `localhost:11211`): The hostname/IP address and port or, unix socket file path of the memcached instance\n\nThe following settings are optional:\n\n- `collection_interval` (default = `10s`): This receiver runs on an interval.\nEach time it runs, it queries memcached, creates metrics, and sends them to the\nnext consumer. The `collection_interval` configuration option tells this\nreceiver the duration between runs. This value must be a string readable by\nGolang's `ParseDuration` function (example: `1h30m`). Valid time units are\n`ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`.\n- `initial_delay` (default = `1s`): defines how long this receiver waits before starting.\n\nExample:\n\n```yaml\nreceivers:\n memcached:\n endpoint: \"localhost:11211\"\n collection_interval: 10s\n```\n\nThe full list of settings exposed for this receiver are documented [here](./config.go)\nwith detailed sample configurations [here](./testdata/config.yaml).\n\n## Metrics\n\nDetails about the metrics produced by this receiver can be found in [metadata.yaml](./metadata.yaml) with further documentation in [documentation.md](./documentation.md)","properties":{"collection_interval":{"title":"collection_interval","type":"string"},"endpoint":{"description":"Endpoint configures the address for this network connection.\nFor TCP and UDP networks, the address has the form \"host:port\". The host must be a literal IP address,\nor a host name that can be resolved to IP addresses. The port must be a literal port number or a service name.\nIf the host is a literal IPv6 address it must be enclosed in square brackets, as in \"[2001:db8::1]:80\" or\n\"[fe80::1%zone]:80\". The zone specifies the scope of the literal IPv6 address as defined in RFC 4007.","title":"endpoint","type":"string"},"initial_delay":{"title":"initial_delay","type":"string"},"metrics":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.memcachedreceiver.internal.metadata.MetricsConfig","title":"metrics"},"timeout":{"description":"Timeout for the memcache stats request","title":"timeout","type":"string"},"transport":{"description":"Transport to use. Known protocols are \"tcp\", \"tcp4\" (IPv4-only), \"tcp6\" (IPv6-only), \"udp\", \"udp4\" (IPv4-only),\n\"udp6\" (IPv6-only), \"ip\", \"ip4\" (IPv4-only), \"ip6\" (IPv6-only), \"unix\", \"unixgram\" and \"unixpacket\".","title":"transport","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.memcachedreceiver.internal.metadata.MetricConfig":{"additionalProperties":false,"description":"MetricConfig provides common config for a particular metric.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.memcachedreceiver.internal.metadata.MetricsConfig":{"additionalProperties":false,"description":"MetricsConfig provides config for memcached metrics.","properties":{"memcached.bytes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.memcachedreceiver.internal.metadata.MetricConfig","title":"memcached.bytes"},"memcached.commands":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.memcachedreceiver.internal.metadata.MetricConfig","title":"memcached.commands"},"memcached.connections.current":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.memcachedreceiver.internal.metadata.MetricConfig","title":"memcached.connections.current"},"memcached.connections.total":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.memcachedreceiver.internal.metadata.MetricConfig","title":"memcached.connections.total"},"memcached.cpu.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.memcachedreceiver.internal.metadata.MetricConfig","title":"memcached.cpu.usage"},"memcached.current_items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.memcachedreceiver.internal.metadata.MetricConfig","title":"memcached.current_items"},"memcached.evictions":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.memcachedreceiver.internal.metadata.MetricConfig","title":"memcached.evictions"},"memcached.network":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.memcachedreceiver.internal.metadata.MetricConfig","title":"memcached.network"},"memcached.operation_hit_ratio":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.memcachedreceiver.internal.metadata.MetricConfig","title":"memcached.operation_hit_ratio"},"memcached.operations":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.memcachedreceiver.internal.metadata.MetricConfig","title":"memcached.operations"},"memcached.threads":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.memcachedreceiver.internal.metadata.MetricConfig","title":"memcached.threads"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.AccessLogsConfig":{"additionalProperties":false,"properties":{"auth_result":{"title":"auth_result","type":"boolean"},"enabled":{"title":"enabled","type":"boolean"},"max_pages":{"title":"max_pages","type":"integer"},"page_size":{"title":"page_size","type":"integer"},"poll_interval":{"title":"poll_interval","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.AlertConfig":{"additionalProperties":false,"properties":{"enabled":{"title":"enabled","type":"boolean"},"endpoint":{"title":"endpoint","type":"string"},"max_pages":{"title":"max_pages","type":"integer"},"mode":{"title":"mode","type":"string"},"page_size":{"title":"page_size","type":"integer"},"poll_interval":{"title":"poll_interval","type":"string"},"projects":{"description":"these parameters are only relevant in retrieval mode","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.ProjectConfig"},"title":"projects","type":"array"},"secret":{"title":"secret","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSServerSetting","title":"tls"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.Config":{"additionalProperties":false,"properties":{"alerts":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.AlertConfig","title":"alerts"},"collection_interval":{"title":"collection_interval","type":"string"},"events":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.EventsConfig","title":"events"},"granularity":{"title":"granularity","type":"string"},"initial_delay":{"title":"initial_delay","type":"string"},"logs":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.LogConfig","title":"logs"},"metrics":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricsConfig","title":"metrics"},"private_key":{"title":"private_key","type":"string"},"public_key":{"title":"public_key","type":"string"},"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.ResourceAttributesConfig","title":"resource_attributes"},"retry_on_failure":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.RetrySettings","title":"retry_on_failure"},"storage":{"title":"storage","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.EventsConfig":{"additionalProperties":false,"description":"EventsConfig is the configuration options for events collection","properties":{"max_pages":{"title":"max_pages","type":"integer"},"organizations":{"items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.OrgConfig"},"title":"organizations","type":"array"},"page_size":{"title":"page_size","type":"integer"},"poll_interval":{"title":"poll_interval","type":"string"},"projects":{"items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.ProjectConfig"},"title":"projects","type":"array"},"types":{"items":{"type":"string"},"title":"types","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.LogConfig":{"additionalProperties":false,"properties":{"enabled":{"title":"enabled","type":"boolean"},"projects":{"items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.LogsProjectConfig"},"title":"projects","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.LogsProjectConfig":{"additionalProperties":false,"properties":{"access_logs":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.AccessLogsConfig","title":"access_logs"},"collect_audit_logs":{"title":"collect_audit_logs","type":"boolean"},"collect_host_logs":{"title":"collect_host_logs","type":"boolean"},"exclude_clusters":{"items":{"type":"string"},"title":"exclude_clusters","type":"array"},"include_clusters":{"items":{"type":"string"},"title":"include_clusters","type":"array"},"name":{"title":"name","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.OrgConfig":{"additionalProperties":false,"properties":{"id":{"title":"id","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.ProjectConfig":{"additionalProperties":false,"properties":{"exclude_clusters":{"items":{"type":"string"},"title":"exclude_clusters","type":"array"},"include_clusters":{"items":{"type":"string"},"title":"include_clusters","type":"array"},"name":{"title":"name","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig":{"additionalProperties":false,"description":"MetricConfig provides common config for a particular metric.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricsConfig":{"additionalProperties":false,"description":"MetricsConfig provides config for mongodbatlas metrics.","properties":{"mongodbatlas.db.counts":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.db.counts"},"mongodbatlas.db.size":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.db.size"},"mongodbatlas.disk.partition.iops.average":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.disk.partition.iops.average"},"mongodbatlas.disk.partition.iops.max":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.disk.partition.iops.max"},"mongodbatlas.disk.partition.latency.average":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.disk.partition.latency.average"},"mongodbatlas.disk.partition.latency.max":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.disk.partition.latency.max"},"mongodbatlas.disk.partition.space.average":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.disk.partition.space.average"},"mongodbatlas.disk.partition.space.max":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.disk.partition.space.max"},"mongodbatlas.disk.partition.usage.average":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.disk.partition.usage.average"},"mongodbatlas.disk.partition.usage.max":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.disk.partition.usage.max"},"mongodbatlas.disk.partition.utilization.average":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.disk.partition.utilization.average"},"mongodbatlas.disk.partition.utilization.max":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.disk.partition.utilization.max"},"mongodbatlas.process.asserts":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.process.asserts"},"mongodbatlas.process.background_flush":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.process.background_flush"},"mongodbatlas.process.cache.io":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.process.cache.io"},"mongodbatlas.process.cache.size":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.process.cache.size"},"mongodbatlas.process.connections":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.process.connections"},"mongodbatlas.process.cpu.children.normalized.usage.average":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.process.cpu.children.normalized.usage.average"},"mongodbatlas.process.cpu.children.normalized.usage.max":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.process.cpu.children.normalized.usage.max"},"mongodbatlas.process.cpu.children.usage.average":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.process.cpu.children.usage.average"},"mongodbatlas.process.cpu.children.usage.max":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.process.cpu.children.usage.max"},"mongodbatlas.process.cpu.normalized.usage.average":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.process.cpu.normalized.usage.average"},"mongodbatlas.process.cpu.normalized.usage.max":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.process.cpu.normalized.usage.max"},"mongodbatlas.process.cpu.usage.average":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.process.cpu.usage.average"},"mongodbatlas.process.cpu.usage.max":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.process.cpu.usage.max"},"mongodbatlas.process.cursors":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.process.cursors"},"mongodbatlas.process.db.document.rate":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.process.db.document.rate"},"mongodbatlas.process.db.operations.rate":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.process.db.operations.rate"},"mongodbatlas.process.db.operations.time":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.process.db.operations.time"},"mongodbatlas.process.db.query_executor.scanned":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.process.db.query_executor.scanned"},"mongodbatlas.process.db.query_targeting.scanned_per_returned":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.process.db.query_targeting.scanned_per_returned"},"mongodbatlas.process.db.storage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.process.db.storage"},"mongodbatlas.process.global_lock":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.process.global_lock"},"mongodbatlas.process.index.btree_miss_ratio":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.process.index.btree_miss_ratio"},"mongodbatlas.process.index.counters":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.process.index.counters"},"mongodbatlas.process.journaling.commits":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.process.journaling.commits"},"mongodbatlas.process.journaling.data_files":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.process.journaling.data_files"},"mongodbatlas.process.journaling.written":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.process.journaling.written"},"mongodbatlas.process.memory.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.process.memory.usage"},"mongodbatlas.process.network.io":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.process.network.io"},"mongodbatlas.process.network.requests":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.process.network.requests"},"mongodbatlas.process.oplog.rate":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.process.oplog.rate"},"mongodbatlas.process.oplog.time":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.process.oplog.time"},"mongodbatlas.process.page_faults":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.process.page_faults"},"mongodbatlas.process.restarts":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.process.restarts"},"mongodbatlas.process.tickets":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.process.tickets"},"mongodbatlas.system.cpu.normalized.usage.average":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.system.cpu.normalized.usage.average"},"mongodbatlas.system.cpu.normalized.usage.max":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.system.cpu.normalized.usage.max"},"mongodbatlas.system.cpu.usage.average":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.system.cpu.usage.average"},"mongodbatlas.system.cpu.usage.max":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.system.cpu.usage.max"},"mongodbatlas.system.fts.cpu.normalized.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.system.fts.cpu.normalized.usage"},"mongodbatlas.system.fts.cpu.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.system.fts.cpu.usage"},"mongodbatlas.system.fts.disk.used":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.system.fts.disk.used"},"mongodbatlas.system.fts.memory.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.system.fts.memory.usage"},"mongodbatlas.system.memory.usage.average":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.system.memory.usage.average"},"mongodbatlas.system.memory.usage.max":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.system.memory.usage.max"},"mongodbatlas.system.network.io.average":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.system.network.io.average"},"mongodbatlas.system.network.io.max":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.system.network.io.max"},"mongodbatlas.system.paging.io.average":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.system.paging.io.average"},"mongodbatlas.system.paging.io.max":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.system.paging.io.max"},"mongodbatlas.system.paging.usage.average":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.system.paging.usage.average"},"mongodbatlas.system.paging.usage.max":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.MetricConfig","title":"mongodbatlas.system.paging.usage.max"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig provides common config for a particular resource attribute.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.ResourceAttributesConfig":{"additionalProperties":false,"description":"ResourceAttributesConfig provides config for mongodbatlas resource attributes.","properties":{"mongodb_atlas.cluster.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.ResourceAttributeConfig","title":"mongodb_atlas.cluster.name"},"mongodb_atlas.db.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.ResourceAttributeConfig","title":"mongodb_atlas.db.name"},"mongodb_atlas.disk.partition":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.ResourceAttributeConfig","title":"mongodb_atlas.disk.partition"},"mongodb_atlas.host.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.ResourceAttributeConfig","title":"mongodb_atlas.host.name"},"mongodb_atlas.org_name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.ResourceAttributeConfig","title":"mongodb_atlas.org_name"},"mongodb_atlas.process.id":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.ResourceAttributeConfig","title":"mongodb_atlas.process.id"},"mongodb_atlas.process.port":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.ResourceAttributeConfig","title":"mongodb_atlas.process.port"},"mongodb_atlas.process.type_name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.ResourceAttributeConfig","title":"mongodb_atlas.process.type_name"},"mongodb_atlas.project.id":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.ResourceAttributeConfig","title":"mongodb_atlas.project.id"},"mongodb_atlas.project.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.ResourceAttributeConfig","title":"mongodb_atlas.project.name"},"mongodb_atlas.user.alias":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.internal.metadata.ResourceAttributeConfig","title":"mongodb_atlas.user.alias"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbreceiver.Config":{"additionalProperties":false,"markdownDescription":"# MongoDB Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: metrics |\n| Distributions | [contrib], [observiq], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThis receiver fetches stats from a MongoDB instance using the [golang\nmongo driver](https://github.com/mongodb/mongo-go-driver). Stats are collected\nvia MongoDB's `dbStats` and `serverStatus` commands.\n\n## Purpose\n\nThe purpose of this receiver is to allow users to monitor metrics from standalone MongoDB clusters. This includes non-Atlas managed MongoDB Servers.\n\n## Prerequisites\n\nThis receiver supports MongoDB versions:\n\n- 4.0+\n- 5.0\n\nMongodb recommends to set up a least privilege user (LPU) with a [`clusterMonitor` role](https://www.mongodb.com/docs/v5.0/reference/built-in-roles/#mongodb-authrole-clusterMonitor) in order to collect metrics. Please refer to [lpu.sh](./testdata/integration/scripts/lpu.sh) for an example of how to configure these permissions.\n\n## Configuration\n\nThe following settings are optional:\n\n- `hosts` (default: [`localhost:27017`]): list of `host:port` or unix domain socket endpoints.\n - For standalone MongoDB deployments this is the hostname and port of the mongod instance\n - For replica sets specify the hostnames and ports of the mongod instances that are in the replica set configuration. If the `replica_set` field is specified, nodes will be autodiscovered.\n - For a sharded MongoDB deployment, please specify a list of the `mongos` hosts.\n- `username`: If authentication is required, the user can with `clusterMonitor` permissions can be provided here.\n- `password`: If authentication is required, the password can be provided here.\n- `collection_interval`: (default = `1m`): This receiver collects metrics on an interval. This value must be a string readable by Golang's [time.ParseDuration](https://pkg.go.dev/time#ParseDuration). Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`.\n- `initial_delay` (default = `1s`): defines how long this receiver waits before starting.\n- `replica_set`: If the deployment of MongoDB is a replica set then this allows users to specify the replica set name which allows for autodiscovery of other nodes in the replica set.\n- `timeout`: (default = `1m`) The timeout of running commands against mongo.\n- `tls`: (defaults defined [here](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md)): TLS control. By default insecure settings are rejected and certificate verification is on.\n\n### Example Configuration\n\n```yaml\nreceivers:\n mongodb:\n hosts:\n - endpoint: localhost:27017\n username: otel\n password: ${env:MONGODB_PASSWORD}\n collection_interval: 60s\n initial_delay: 1s\n tls:\n insecure: true\n insecure_skip_verify: true\n```\n\nThe full list of settings exposed for this receiver are documented [here](./config.go) with detailed sample configurations [here](./testdata/config.yaml).\n\n## Metrics\n\nThe following metric are available with versions:\n\n- `mongodb.extent.count` \u003c 4.4 with mmapv1 storage engine\n- `mongodb.session.count` \u003e= 3.0 with wiredTiger storage engine\n- `mongodb.cache.operations` \u003e= 3.0 with wiredTiger storage engine\n- `mongodb.connection.count` with attribute `active` is available \u003e= 4.0\n- `mongodb.index.access.count` \u003e= 4.0\n\nDetails about the metrics produced by this receiver can be found in [metadata.yaml](./metadata.yaml)","properties":{"collection_interval":{"title":"collection_interval","type":"string"},"hosts":{"items":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.confignet.NetAddr"},"title":"hosts","type":"array"},"initial_delay":{"title":"initial_delay","type":"string"},"metrics":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbreceiver.internal.metadata.MetricsConfig","title":"metrics"},"password":{"title":"password","type":"string"},"replica_set":{"title":"replica_set","type":"string"},"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbreceiver.internal.metadata.ResourceAttributesConfig","title":"resource_attributes"},"timeout":{"title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","title":"tls"},"username":{"title":"username","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbreceiver.internal.metadata.MetricConfig":{"additionalProperties":false,"description":"MetricConfig provides common config for a particular metric.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbreceiver.internal.metadata.MetricsConfig":{"additionalProperties":false,"description":"MetricsConfig provides config for mongodb metrics.","properties":{"mongodb.cache.operations":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbreceiver.internal.metadata.MetricConfig","title":"mongodb.cache.operations"},"mongodb.collection.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbreceiver.internal.metadata.MetricConfig","title":"mongodb.collection.count"},"mongodb.connection.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbreceiver.internal.metadata.MetricConfig","title":"mongodb.connection.count"},"mongodb.cursor.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbreceiver.internal.metadata.MetricConfig","title":"mongodb.cursor.count"},"mongodb.cursor.timeout.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbreceiver.internal.metadata.MetricConfig","title":"mongodb.cursor.timeout.count"},"mongodb.data.size":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbreceiver.internal.metadata.MetricConfig","title":"mongodb.data.size"},"mongodb.database.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbreceiver.internal.metadata.MetricConfig","title":"mongodb.database.count"},"mongodb.document.operation.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbreceiver.internal.metadata.MetricConfig","title":"mongodb.document.operation.count"},"mongodb.extent.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbreceiver.internal.metadata.MetricConfig","title":"mongodb.extent.count"},"mongodb.global_lock.time":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbreceiver.internal.metadata.MetricConfig","title":"mongodb.global_lock.time"},"mongodb.health":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbreceiver.internal.metadata.MetricConfig","title":"mongodb.health"},"mongodb.index.access.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbreceiver.internal.metadata.MetricConfig","title":"mongodb.index.access.count"},"mongodb.index.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbreceiver.internal.metadata.MetricConfig","title":"mongodb.index.count"},"mongodb.index.size":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbreceiver.internal.metadata.MetricConfig","title":"mongodb.index.size"},"mongodb.lock.acquire.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbreceiver.internal.metadata.MetricConfig","title":"mongodb.lock.acquire.count"},"mongodb.lock.acquire.time":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbreceiver.internal.metadata.MetricConfig","title":"mongodb.lock.acquire.time"},"mongodb.lock.acquire.wait_count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbreceiver.internal.metadata.MetricConfig","title":"mongodb.lock.acquire.wait_count"},"mongodb.lock.deadlock.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbreceiver.internal.metadata.MetricConfig","title":"mongodb.lock.deadlock.count"},"mongodb.memory.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbreceiver.internal.metadata.MetricConfig","title":"mongodb.memory.usage"},"mongodb.network.io.receive":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbreceiver.internal.metadata.MetricConfig","title":"mongodb.network.io.receive"},"mongodb.network.io.transmit":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbreceiver.internal.metadata.MetricConfig","title":"mongodb.network.io.transmit"},"mongodb.network.request.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbreceiver.internal.metadata.MetricConfig","title":"mongodb.network.request.count"},"mongodb.object.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbreceiver.internal.metadata.MetricConfig","title":"mongodb.object.count"},"mongodb.operation.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbreceiver.internal.metadata.MetricConfig","title":"mongodb.operation.count"},"mongodb.operation.latency.time":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbreceiver.internal.metadata.MetricConfig","title":"mongodb.operation.latency.time"},"mongodb.operation.repl.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbreceiver.internal.metadata.MetricConfig","title":"mongodb.operation.repl.count"},"mongodb.operation.time":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbreceiver.internal.metadata.MetricConfig","title":"mongodb.operation.time"},"mongodb.session.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbreceiver.internal.metadata.MetricConfig","title":"mongodb.session.count"},"mongodb.storage.size":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbreceiver.internal.metadata.MetricConfig","title":"mongodb.storage.size"},"mongodb.uptime":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbreceiver.internal.metadata.MetricConfig","title":"mongodb.uptime"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbreceiver.internal.metadata.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig provides common config for a particular resource attribute.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbreceiver.internal.metadata.ResourceAttributesConfig":{"additionalProperties":false,"description":"ResourceAttributesConfig provides config for mongodb resource attributes.","properties":{"database":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbreceiver.internal.metadata.ResourceAttributeConfig","title":"database"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.Config":{"additionalProperties":false,"properties":{"allow_native_passwords":{"title":"allow_native_passwords","type":"boolean"},"collection_interval":{"title":"collection_interval","type":"string"},"database":{"title":"database","type":"string"},"endpoint":{"description":"Endpoint configures the address for this network connection.\nFor TCP and UDP networks, the address has the form \"host:port\". The host must be a literal IP address,\nor a host name that can be resolved to IP addresses. The port must be a literal port number or a service name.\nIf the host is a literal IPv6 address it must be enclosed in square brackets, as in \"[2001:db8::1]:80\" or\n\"[fe80::1%zone]:80\". The zone specifies the scope of the literal IPv6 address as defined in RFC 4007.","title":"endpoint","type":"string"},"initial_delay":{"title":"initial_delay","type":"string"},"metrics":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricsConfig","title":"metrics"},"password":{"title":"password","type":"string"},"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.ResourceAttributesConfig","title":"resource_attributes"},"statement_events":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.StatementEventsConfig","title":"statement_events"},"transport":{"description":"Transport to use. Known protocols are \"tcp\", \"tcp4\" (IPv4-only), \"tcp6\" (IPv6-only), \"udp\", \"udp4\" (IPv4-only),\n\"udp6\" (IPv6-only), \"ip\", \"ip4\" (IPv4-only), \"ip6\" (IPv6-only), \"unix\", \"unixgram\" and \"unixpacket\".","title":"transport","type":"string"},"username":{"title":"username","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.StatementEventsConfig":{"additionalProperties":false,"properties":{"digest_text_limit":{"title":"digest_text_limit","type":"integer"},"limit":{"title":"limit","type":"integer"},"time_limit":{"title":"time_limit","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig":{"additionalProperties":false,"description":"MetricConfig provides common config for a particular metric.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricsConfig":{"additionalProperties":false,"description":"MetricsConfig provides config for mysql metrics.","properties":{"mysql.buffer_pool.data_pages":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.buffer_pool.data_pages"},"mysql.buffer_pool.limit":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.buffer_pool.limit"},"mysql.buffer_pool.operations":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.buffer_pool.operations"},"mysql.buffer_pool.page_flushes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.buffer_pool.page_flushes"},"mysql.buffer_pool.pages":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.buffer_pool.pages"},"mysql.buffer_pool.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.buffer_pool.usage"},"mysql.client.network.io":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.client.network.io"},"mysql.commands":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.commands"},"mysql.connection.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.connection.count"},"mysql.connection.errors":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.connection.errors"},"mysql.double_writes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.double_writes"},"mysql.handlers":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.handlers"},"mysql.index.io.wait.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.index.io.wait.count"},"mysql.index.io.wait.time":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.index.io.wait.time"},"mysql.joins":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.joins"},"mysql.locked_connects":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.locked_connects"},"mysql.locks":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.locks"},"mysql.log_operations":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.log_operations"},"mysql.mysqlx_connections":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.mysqlx_connections"},"mysql.mysqlx_worker_threads":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.mysqlx_worker_threads"},"mysql.opened_resources":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.opened_resources"},"mysql.operations":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.operations"},"mysql.page_operations":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.page_operations"},"mysql.prepared_statements":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.prepared_statements"},"mysql.query.client.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.query.client.count"},"mysql.query.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.query.count"},"mysql.query.slow.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.query.slow.count"},"mysql.replica.sql_delay":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.replica.sql_delay"},"mysql.replica.time_behind_source":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.replica.time_behind_source"},"mysql.row_locks":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.row_locks"},"mysql.row_operations":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.row_operations"},"mysql.sorts":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.sorts"},"mysql.statement_event.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.statement_event.count"},"mysql.statement_event.wait.time":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.statement_event.wait.time"},"mysql.table.io.wait.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.table.io.wait.count"},"mysql.table.io.wait.time":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.table.io.wait.time"},"mysql.table.lock_wait.read.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.table.lock_wait.read.count"},"mysql.table.lock_wait.read.time":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.table.lock_wait.read.time"},"mysql.table.lock_wait.write.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.table.lock_wait.write.count"},"mysql.table.lock_wait.write.time":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.table.lock_wait.write.time"},"mysql.table_open_cache":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.table_open_cache"},"mysql.threads":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.threads"},"mysql.tmp_resources":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.tmp_resources"},"mysql.uptime":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.MetricConfig","title":"mysql.uptime"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig provides common config for a particular resource attribute.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.ResourceAttributesConfig":{"additionalProperties":false,"description":"ResourceAttributesConfig provides config for mysql resource attributes.","properties":{"mysql.instance.endpoint":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.internal.metadata.ResourceAttributeConfig","title":"mysql.instance.endpoint"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.nginxreceiver.Config":{"additionalProperties":false,"markdownDescription":"# Nginx Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: metrics |\n| Distributions | [contrib], [observiq], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThis receiver can fetch stats from a Nginx instance using the `ngx_http_stub_status_module` module's `status` endpoint.\n\n## Details\n\n## Configuration\n\n### Nginx Module\nYou must configure NGINX to expose status information by editing the NGINX\nconfiguration. Please see\n[ngx_http_stub_status_module](http://nginx.org/en/docs/http/ngx_http_stub_status_module.html)\nfor a guide to configuring the NGINX stats module `ngx_http_stub_status_module`.\n\n### Receiver Config\n\n\u003e :information_source: This receiver is in beta and configuration fields are subject to change.\n\nThe following settings are required:\n\n- `endpoint` (default: `http://localhost:80/status`): The URL of the nginx status endpoint\n\nThe following settings are optional:\n\n- `collection_interval` (default = `10s`): This receiver runs on an interval.\nEach time it runs, it queries nginx, creates metrics, and sends them to the\nnext consumer. The `collection_interval` configuration option tells this\nreceiver the duration between runs. This value must be a string readable by\nGolang's `ParseDuration` function (example: `1h30m`). Valid time units are\n`ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`.\n- `initial_delay` (default = `1s`): defines how long this receiver waits before starting.\n\nExample:\n\n```yaml\nreceivers:\n nginx:\n endpoint: \"http://localhost:80/status\"\n collection_interval: 10s\n```\n\nThe full list of settings exposed for this receiver are documented [here](./config.go)\nwith detailed sample configurations [here](./testdata/config.yaml).\n\n## Feature gate configurations\n\nSee the [Collector feature gates](https://github.com/open-telemetry/opentelemetry-collector/blob/main/featuregate/README.md#collector-feature-gates) for an overview of feature gates in the collector.\n\n**ALPHA**: `receiver.nginx.emitCurrentConnectionsAsSum`\n\nThe feature gate `receiver.nginx.emitConnectionsCurrentAsSum` once enabled will change the data type of the\n`nginx.connections_current` metric from a gauge to a non-monotonic sum.\n\nThis feature gate will eventually be enabled by default, and eventually the old implementation will be removed. It aims\nto give users time to migrate to the new implementation. The target release for this featuregate to be enabled by default\nis 0.80.0.","properties":{"CustomRoundTripper":{"description":"Custom Round Tripper to allow for individual components to intercept HTTP requests","title":"CustomRoundTripper"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing HTTP calls.","title":"auth"},"collection_interval":{"title":"collection_interval","type":"string"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"endpoint":{"description":"The target URL to send data to (e.g.: http://some.url:9411/v1/traces).","title":"endpoint","type":"string"},"headers":{"description":"Additional headers attached to each HTTP request sent by the client.\nExisting header values are overwritten if collision happens.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"idle_conn_timeout":{"description":"IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"idle_conn_timeout","type":"string"},"initial_delay":{"title":"initial_delay","type":"string"},"max_conns_per_host":{"description":"MaxConnsPerHost limits the total number of connections per host, including connections in the dialing,\nactive, and idle states.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_conns_per_host","type":"integer"},"max_idle_conns":{"description":"MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns","type":"integer"},"max_idle_conns_per_host":{"description":"MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns_per_host","type":"integer"},"metrics":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.nginxreceiver.internal.metadata.MetricsConfig","title":"metrics"},"read_buffer_size":{"description":"ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize.","title":"read_buffer_size","type":"integer"},"timeout":{"description":"Timeout parameter configures `http.Client.Timeout`.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"write_buffer_size":{"description":"WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize.","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.nginxreceiver.internal.metadata.MetricConfig":{"additionalProperties":false,"description":"MetricConfig provides common config for a particular metric.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.nginxreceiver.internal.metadata.MetricsConfig":{"additionalProperties":false,"description":"MetricsConfig provides config for nginx metrics.","properties":{"nginx.connections_accepted":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.nginxreceiver.internal.metadata.MetricConfig","title":"nginx.connections_accepted"},"nginx.connections_current":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.nginxreceiver.internal.metadata.MetricConfig","title":"nginx.connections_current"},"nginx.connections_handled":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.nginxreceiver.internal.metadata.MetricConfig","title":"nginx.connections_handled"},"nginx.requests":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.nginxreceiver.internal.metadata.MetricConfig","title":"nginx.requests"},"temp.connections_current":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.nginxreceiver.internal.metadata.MetricConfig","title":"temp.connections_current"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.nsxtreceiver.Config":{"additionalProperties":false,"description":"Config is the configuration for the NSX receiver","properties":{"CustomRoundTripper":{"description":"Custom Round Tripper to allow for individual components to intercept HTTP requests","title":"CustomRoundTripper"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing HTTP calls.","title":"auth"},"collection_interval":{"title":"collection_interval","type":"string"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"endpoint":{"description":"The target URL to send data to (e.g.: http://some.url:9411/v1/traces).","title":"endpoint","type":"string"},"headers":{"description":"Additional headers attached to each HTTP request sent by the client.\nExisting header values are overwritten if collision happens.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"idle_conn_timeout":{"description":"IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"idle_conn_timeout","type":"string"},"initial_delay":{"title":"initial_delay","type":"string"},"max_conns_per_host":{"description":"MaxConnsPerHost limits the total number of connections per host, including connections in the dialing,\nactive, and idle states.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_conns_per_host","type":"integer"},"max_idle_conns":{"description":"MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns","type":"integer"},"max_idle_conns_per_host":{"description":"MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns_per_host","type":"integer"},"metrics":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.nsxtreceiver.internal.metadata.MetricsConfig","title":"metrics"},"password":{"title":"password","type":"string"},"read_buffer_size":{"description":"ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize.","title":"read_buffer_size","type":"integer"},"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.nsxtreceiver.internal.metadata.ResourceAttributesConfig","title":"resource_attributes"},"timeout":{"description":"Timeout parameter configures `http.Client.Timeout`.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"username":{"title":"username","type":"string"},"write_buffer_size":{"description":"WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize.","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.nsxtreceiver.internal.metadata.MetricConfig":{"additionalProperties":false,"description":"MetricConfig provides common config for a particular metric.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.nsxtreceiver.internal.metadata.MetricsConfig":{"additionalProperties":false,"description":"MetricsConfig provides config for nsxt metrics.","properties":{"nsxt.node.cpu.utilization":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.nsxtreceiver.internal.metadata.MetricConfig","title":"nsxt.node.cpu.utilization"},"nsxt.node.filesystem.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.nsxtreceiver.internal.metadata.MetricConfig","title":"nsxt.node.filesystem.usage"},"nsxt.node.filesystem.utilization":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.nsxtreceiver.internal.metadata.MetricConfig","title":"nsxt.node.filesystem.utilization"},"nsxt.node.memory.cache.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.nsxtreceiver.internal.metadata.MetricConfig","title":"nsxt.node.memory.cache.usage"},"nsxt.node.memory.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.nsxtreceiver.internal.metadata.MetricConfig","title":"nsxt.node.memory.usage"},"nsxt.node.network.io":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.nsxtreceiver.internal.metadata.MetricConfig","title":"nsxt.node.network.io"},"nsxt.node.network.packet.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.nsxtreceiver.internal.metadata.MetricConfig","title":"nsxt.node.network.packet.count"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.nsxtreceiver.internal.metadata.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig provides common config for a particular resource attribute.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.nsxtreceiver.internal.metadata.ResourceAttributesConfig":{"additionalProperties":false,"description":"ResourceAttributesConfig provides config for nsxt resource attributes.","properties":{"device.id":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.nsxtreceiver.internal.metadata.ResourceAttributeConfig","title":"device.id"},"nsxt.node.id":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.nsxtreceiver.internal.metadata.ResourceAttributeConfig","title":"nsxt.node.id"},"nsxt.node.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.nsxtreceiver.internal.metadata.ResourceAttributeConfig","title":"nsxt.node.name"},"nsxt.node.type":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.nsxtreceiver.internal.metadata.ResourceAttributeConfig","title":"nsxt.node.type"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.opencensusreceiver.Config":{"additionalProperties":false,"description":"Config defines configuration for OpenCensus receiver.","markdownDescription":"# OpenCensus Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: metrics, traces |\n| Distributions | [core], [contrib], [observiq], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nReceives data via gRPC or HTTP using [OpenCensus]( https://opencensus.io/)\nformat.\n\n## Getting Started\n\nAll that is required to enable the OpenCensus receiver is to include it in the\nreceiver definitions.\n\n```yaml\nreceivers:\n opencensus:\n```\n\nThe following settings are configurable:\n\n- `endpoint` (default = 0.0.0.0:55678): host:port to which the receiver is\n going to receive data. The valid syntax is described at\n https://github.com/grpc/grpc/blob/master/doc/naming.md.\n\n## Advanced Configuration\n\nSeveral helper files are leveraged to provide additional capabilities automatically:\n\n- [gRPC settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configgrpc/README.md) including CORS\n- [TLS and mTLS settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md)\n- [Queuing, retry and timeout settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md)\n\n## Writing with HTTP/JSON\n\nThe OpenCensus receiver can receive trace export calls via HTTP/JSON in\naddition to gRPC. The HTTP/JSON address is the same as gRPC as the protocol is\nrecognized and processed accordingly.\n\nTo write traces with HTTP/JSON, `POST` to `[address]/v1/trace`. The JSON message\nformat parallels the gRPC protobuf format, see this\n[OpenApi spec for it](https://github.com/census-instrumentation/opencensus-proto/blob/master/gen-openapi/opencensus/proto/agent/trace/v1/trace_service.swagger.json).\n\nThe HTTP/JSON endpoint can also optionally configure\n[CORS](https://fetch.spec.whatwg.org/#cors-protocol), which is enabled by\nspecifying a list of allowed CORS origins in the `cors_allowed_origins` field:\n\n```yaml\nreceivers:\n opencensus:\n cors_allowed_origins:\n - http://test.com\n # Origins can have wildcards with *, use * by itself to match any origin.\n - https://*.example.com\n```","properties":{"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth for this receiver","title":"auth"},"cors_allowed_origins":{"description":"CorsOrigins are the allowed CORS origins for HTTP/JSON requests to grpc-gateway adapter\nfor the OpenCensus receiver. See github.com/rs/cors\nAn empty list means that CORS is not enabled at all. A wildcard (*) can be\nused to match any origin or one or more characters of an origin.","items":{"type":"string"},"title":"cors_allowed_origins","type":"array"},"endpoint":{"description":"Endpoint configures the address for this network connection.\nFor TCP and UDP networks, the address has the form \"host:port\". The host must be a literal IP address,\nor a host name that can be resolved to IP addresses. The port must be a literal port number or a service name.\nIf the host is a literal IPv6 address it must be enclosed in square brackets, as in \"[2001:db8::1]:80\" or\n\"[fe80::1%zone]:80\". The zone specifies the scope of the literal IPv6 address as defined in RFC 4007.","title":"endpoint","type":"string"},"include_metadata":{"description":"Include propagates the incoming connection's metadata to downstream consumers.\nExperimental: *NOTE* this option is subject to change or removal in the future.","title":"include_metadata","type":"boolean"},"keepalive":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configgrpc.KeepaliveServerConfig","description":"Keepalive anchor for all the settings related to keepalive.","title":"keepalive"},"max_concurrent_streams":{"description":"MaxConcurrentStreams sets the limit on the number of concurrent streams to each ServerTransport.\nIt has effect only for streaming RPCs.","title":"max_concurrent_streams","type":"integer"},"max_recv_msg_size_mib":{"description":"MaxRecvMsgSizeMiB sets the maximum size (in MiB) of messages accepted by the server.","title":"max_recv_msg_size_mib","type":"integer"},"read_buffer_size":{"description":"ReadBufferSize for gRPC server. See grpc.ReadBufferSize.\n(https://godoc.org/google.golang.org/grpc#ReadBufferSize).","title":"read_buffer_size","type":"integer"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSServerSetting","description":"Configures the protocol to use TLS.\nThe default value is nil, which will cause the protocol to not use TLS.","title":"tls"},"transport":{"description":"Transport to use. Known protocols are \"tcp\", \"tcp4\" (IPv4-only), \"tcp6\" (IPv6-only), \"udp\", \"udp4\" (IPv4-only),\n\"udp6\" (IPv6-only), \"ip\", \"ip4\" (IPv4-only), \"ip6\" (IPv6-only), \"unix\", \"unixgram\" and \"unixpacket\".","title":"transport","type":"string"},"write_buffer_size":{"description":"WriteBufferSize for gRPC server. See grpc.WriteBufferSize.\n(https://godoc.org/google.golang.org/grpc#WriteBufferSize).","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.oracledbreceiver.Config":{"additionalProperties":false,"markdownDescription":"# Oracle DB receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [alpha]: metrics |\n| Distributions | [contrib], [splunk] |\n\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThis receiver collects metrics from an Oracle Database.\n\nThe receiver connects to a database host and performs periodically queries.\n\n## Getting Started\n\nTo use the OracleDB receiver you must define how to connect to your DB. This can be done in two ways,\ndefined in the [Primary](#primary-configuration-option) and [Secondary](#secondary-configuration-option) configuration\noption sections. Defining one of the two configurations is required. If both are defined, the primary\noption will be used.\n\n### Primary Configuration Option\n\nRequired options:\n- `datasource`: Oracle database connection string. Special characters must be encoded. Refer to Oracle Go Driver go_ora documentation for full connection string options.\n\nExample:\n\n```yaml\nreceivers:\n oracledb:\n datasource: \"oracle://otel:password@localhost:51521/XE\"\n```\n\n### Secondary Configuration Option\n\nRequired options:\n- `endpoint`: Endpoint used to connect to the OracleDB server. Must be in the format of `host:port`\n- `password`: Password for the OracleDB connection. Special characters are allowed.\n- `service`: OracleDB Service that the receiver should connect to.\n- `username`: Username for the OracleDB connection.\n\nExample:\n```yaml\nreceivers:\n oracledb:\n endpoint: localhost:51521\n password: p@sswo%d\n service: XE\n username: otel\n```\n\n## Permissions\n\nDepending on which metrics you collect, you will need to assign those permissions to the database user:\n```\nGRANT SELECT ON V_$SESSION TO \u003cusername\u003e;\nGRANT SELECT ON V_$SYSSTAT TO \u003cusername\u003e;\nGRANT SELECT ON V_$RESOURCE_LIMIT TO \u003cusername\u003e;\nGRANT SELECT ON DBA_TABLESPACES TO \u003cusername\u003e;\nGRANT SELECT ON DBA_DATA_FILES TO \u003cusername\u003e;\n```\n\n## Enabling metrics.\n\nSee [documentation](./documentation.md).\n\nYou can enable or disable selective metrics.\n\nExample:\n\n```yaml\nreceivers:\n oracledb:\n datasource: \"oracle://otel:password@localhost:51521/XE\"\n metrics:\n oracledb.query.cpu_time:\n enabled: false\n oracledb.query.physical_read_requests:\n enabled: true\n```","properties":{"collection_interval":{"title":"collection_interval","type":"string"},"datasource":{"title":"datasource","type":"string"},"endpoint":{"title":"endpoint","type":"string"},"initial_delay":{"title":"initial_delay","type":"string"},"metrics":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.oracledbreceiver.internal.metadata.MetricsConfig","title":"metrics"},"password":{"title":"password","type":"string"},"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.oracledbreceiver.internal.metadata.ResourceAttributesConfig","title":"resource_attributes"},"service":{"title":"service","type":"string"},"username":{"title":"username","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.oracledbreceiver.internal.metadata.MetricConfig":{"additionalProperties":false,"description":"MetricConfig provides common config for a particular metric.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.oracledbreceiver.internal.metadata.MetricsConfig":{"additionalProperties":false,"description":"MetricsConfig provides config for oracledb metrics.","properties":{"oracledb.consistent_gets":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.oracledbreceiver.internal.metadata.MetricConfig","title":"oracledb.consistent_gets"},"oracledb.cpu_time":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.oracledbreceiver.internal.metadata.MetricConfig","title":"oracledb.cpu_time"},"oracledb.db_block_gets":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.oracledbreceiver.internal.metadata.MetricConfig","title":"oracledb.db_block_gets"},"oracledb.dml_locks.limit":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.oracledbreceiver.internal.metadata.MetricConfig","title":"oracledb.dml_locks.limit"},"oracledb.dml_locks.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.oracledbreceiver.internal.metadata.MetricConfig","title":"oracledb.dml_locks.usage"},"oracledb.enqueue_deadlocks":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.oracledbreceiver.internal.metadata.MetricConfig","title":"oracledb.enqueue_deadlocks"},"oracledb.enqueue_locks.limit":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.oracledbreceiver.internal.metadata.MetricConfig","title":"oracledb.enqueue_locks.limit"},"oracledb.enqueue_locks.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.oracledbreceiver.internal.metadata.MetricConfig","title":"oracledb.enqueue_locks.usage"},"oracledb.enqueue_resources.limit":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.oracledbreceiver.internal.metadata.MetricConfig","title":"oracledb.enqueue_resources.limit"},"oracledb.enqueue_resources.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.oracledbreceiver.internal.metadata.MetricConfig","title":"oracledb.enqueue_resources.usage"},"oracledb.exchange_deadlocks":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.oracledbreceiver.internal.metadata.MetricConfig","title":"oracledb.exchange_deadlocks"},"oracledb.executions":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.oracledbreceiver.internal.metadata.MetricConfig","title":"oracledb.executions"},"oracledb.hard_parses":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.oracledbreceiver.internal.metadata.MetricConfig","title":"oracledb.hard_parses"},"oracledb.logical_reads":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.oracledbreceiver.internal.metadata.MetricConfig","title":"oracledb.logical_reads"},"oracledb.parse_calls":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.oracledbreceiver.internal.metadata.MetricConfig","title":"oracledb.parse_calls"},"oracledb.pga_memory":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.oracledbreceiver.internal.metadata.MetricConfig","title":"oracledb.pga_memory"},"oracledb.physical_reads":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.oracledbreceiver.internal.metadata.MetricConfig","title":"oracledb.physical_reads"},"oracledb.processes.limit":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.oracledbreceiver.internal.metadata.MetricConfig","title":"oracledb.processes.limit"},"oracledb.processes.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.oracledbreceiver.internal.metadata.MetricConfig","title":"oracledb.processes.usage"},"oracledb.sessions.limit":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.oracledbreceiver.internal.metadata.MetricConfig","title":"oracledb.sessions.limit"},"oracledb.sessions.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.oracledbreceiver.internal.metadata.MetricConfig","title":"oracledb.sessions.usage"},"oracledb.tablespace_size.limit":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.oracledbreceiver.internal.metadata.MetricConfig","title":"oracledb.tablespace_size.limit"},"oracledb.tablespace_size.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.oracledbreceiver.internal.metadata.MetricConfig","title":"oracledb.tablespace_size.usage"},"oracledb.transactions.limit":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.oracledbreceiver.internal.metadata.MetricConfig","title":"oracledb.transactions.limit"},"oracledb.transactions.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.oracledbreceiver.internal.metadata.MetricConfig","title":"oracledb.transactions.usage"},"oracledb.user_commits":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.oracledbreceiver.internal.metadata.MetricConfig","title":"oracledb.user_commits"},"oracledb.user_rollbacks":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.oracledbreceiver.internal.metadata.MetricConfig","title":"oracledb.user_rollbacks"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.oracledbreceiver.internal.metadata.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig provides common config for a particular resource attribute.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.oracledbreceiver.internal.metadata.ResourceAttributesConfig":{"additionalProperties":false,"description":"ResourceAttributesConfig provides config for oracledb resource attributes.","properties":{"oracledb.instance.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.oracledbreceiver.internal.metadata.ResourceAttributeConfig","title":"oracledb.instance.name"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.otlpjsonfilereceiver.Config":{"additionalProperties":false,"markdownDescription":"# OTLP JSON File Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [alpha]: traces, metrics, logs |\n| Distributions | [contrib], [sumo] |\n\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThis receiver will read pipeline data from JSON files. The data is written in\n[Protobuf JSON\nencoding](https://developers.google.com/protocol-buffers/docs/proto3#json)\nusing [OpenTelemetry\nprotocol](https://github.com/open-telemetry/opentelemetry-proto).\n\nThe receiver will watch the directory and read files. If a file is updated or added,\nthe receiver will read it in its entirety again.\n\nPlease note that there is no guarantee that exact field names will remain stable.\nThis intended for primarily for debugging Collector without setting up backends.\n\n## Getting Started\n\nThe following settings are required:\n\n- `include`: set a glob path of files to include in data collection\n\nExample:\n\n```yaml\nreceivers:\n otlpjsonfile:\n include:\n - \"/var/log/*.log\"\n exclude:\n - \"/var/log/example.log\"\n```","properties":{"delete_after_read":{"title":"delete_after_read","type":"boolean"},"encoding":{"title":"encoding","type":"string"},"exclude":{"items":{"type":"string"},"title":"exclude","type":"array"},"fingerprint_size":{"title":"fingerprint_size","type":"integer"},"force_flush_period":{"title":"force_flush_period","type":"string"},"header":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.pkg.stanza.fileconsumer.HeaderConfig","title":"header"},"include":{"items":{"type":"string"},"title":"include","type":"array"},"include_file_name":{"title":"include_file_name","type":"boolean"},"include_file_name_resolved":{"title":"include_file_name_resolved","type":"boolean"},"include_file_path":{"title":"include_file_path","type":"boolean"},"include_file_path_resolved":{"title":"include_file_path_resolved","type":"boolean"},"max_batches":{"title":"max_batches","type":"integer"},"max_concurrent_files":{"title":"max_concurrent_files","type":"integer"},"max_log_size":{"title":"max_log_size","type":"integer"},"multiline":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.pkg.stanza.operator.helper.MultilineConfig","title":"multiline"},"ordering_criteria":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.pkg.stanza.fileconsumer.OrderingCriteria","title":"ordering_criteria"},"poll_interval":{"title":"poll_interval","type":"string"},"preserve_leading_whitespaces":{"title":"preserve_leading_whitespaces","type":"boolean"},"preserve_trailing_whitespaces":{"title":"preserve_trailing_whitespaces","type":"boolean"},"start_at":{"title":"start_at","type":"string"},"storage":{"title":"storage","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.podmanreceiver.Config":{"additionalProperties":false,"markdownDescription":"# Podman Stats Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [development]: metrics |\n| Distributions | [contrib], [observiq], [sumo] |\n\n[development]: https://github.com/open-telemetry/opentelemetry-collector#development\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe Podman Stats receiver queries the Podman service API to fetch stats for all running containers \non a configured interval. These stats are for container\nresource usage of cpu, memory, network, and the\n[blkio controller](https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt).\n\n\u003e :information_source: Requires Podman API version 3.3.1+ and Windows is not supported.\n\n\n## Configuration\n\nThe following settings are required:\n\n- `endpoint` (default = `unix:///run/podman/podman.sock`): Address to reach the desired Podman daemon.\n\nThe following settings are optional:\n\n- `collection_interval` (default = `10s`): The interval at which to gather container stats.\n- `initial_delay` (default = `1s`): defines how long this receiver waits before starting.\n- `timeout` (default = `5s`): The maximum amount of time to wait for Podman API responses.\n\nExample:\n\n```yaml\nreceivers:\n podman_stats:\n endpoint: unix://run/podman/podman.sock\n timeout: 10s\n collection_interval: 10s\n initial_delay: 1s\n```\n\nThe full list of settings exposed for this receiver are documented [here](./config.go)\nwith detailed sample configurations [here](./testdata/config.yaml).\n\n### Connecting over SSH\n\n```yaml\nreceivers:\n podman_stats:\n endpoint: ssh://core@localhost:53841/run/user/1000/podman/podman.sock\n ssh_key: /path/to/ssh/private/key\n ssh_passphrase: \u003cpassword\u003e\n```\n\n### Podman API compatibility\n\nThe receiver has only been tested with API 3.3.1+ but it may work with older versions as well. If you want to use the\nreceiver with an older API version, please set the `api_version` to the desired version. For example,\n\n```yaml\nreceivers:\n podman_stats:\n endpoint: unix://run/podman/podman.sock\n api_version: 3.2.0\n```\n## Metrics\n\nThe receiver emits the following metrics:\n\n\tcontainer.memory.usage.limit\n\tcontainer.memory.usage.total\n\tcontainer.memory.percent\n\tcontainer.network.io.usage.tx_bytes\n\tcontainer.network.io.usage.rx_bytes\n\tcontainer.blockio.io_service_bytes_recursive.write\n\tcontainer.blockio.io_service_bytes_recursive.read\n\tcontainer.cpu.usage.system\n\tcontainer.cpu.usage.total\n\tcontainer.cpu.percent\n\tcontainer.cpu.usage.percpu\n\n## Building\n\nThis receiver uses the official libpod Go bindings for Podman. In order to include\nthis receiver in your build, you'll need to make sure all non-Go dependencies are\nsatisfied or some features are exluded. You can use the below mentioned build tags to\nexclude the non-Go dependencies. This receiver does not use any features enabled\nby these deps so excluding these does not affect the functionality in any way.\n\nRecommended build tags to use when including this receiver in your build:\n\n- `containers_image_openpgp`\n- `exclude_graphdriver_btrfs`\n- `exclude_graphdriver_devicemapper`","properties":{"api_version":{"title":"api_version","type":"string"},"collection_interval":{"title":"collection_interval","type":"string"},"endpoint":{"description":"The URL of the podman server. Default is \"unix:///run/podman/podman.sock\"","title":"endpoint","type":"string"},"initial_delay":{"title":"initial_delay","type":"string"},"ssh_key":{"title":"ssh_key","type":"string"},"ssh_passphrase":{"title":"ssh_passphrase","type":"string"},"timeout":{"description":"The maximum amount of time to wait for Podman API responses. Default is 5s","title":"timeout","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.postgresqlreceiver.Config":{"additionalProperties":false,"markdownDescription":"# PostgreSQL Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: metrics |\n| Distributions | [contrib], [observiq], [splunk], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThis receiver queries the PostgreSQL [statistics collector](https://www.postgresql.org/docs/9.6/monitoring-stats.html).\n\n\u003e :construction: This receiver is in **BETA**. Configuration fields and metric data model are subject to change.\n\n## Prerequisites\n\nThis receiver supports PostgreSQL versions 9.6+\n\nThe monitoring user must be granted `SELECT` on `pg_stat_database`.\n\n## Configuration\n\nThe following settings are required to create a database connection:\n\n- `username`\n- `password`\n\nThe following settings are optional:\n\n- `endpoint` (default = `localhost:5432`): The endpoint of the postgresql server. Whether using TCP or Unix sockets, this value should be `host:port`. If `transport` is set to `unix`, the endpoint will internally be translated from `host:port` to `/host.s.PGSQL.port`\n- `transport` (default = `tcp`): The transport protocol being used to connect to postgresql. Available options are `tcp` and `unix`.\n\n- `databases` (default = `[]`): The list of databases for which the receiver will attempt to collect statistics. If an empty list is provided, the receiver will attempt to collect statistics for all non-template databases.\n\nThe following settings are also optional and nested under `tls` to help configure client transport security\n\n- `insecure` (default = `false`): Whether to enable client transport security for the postgresql connection.\n- `insecure_skip_verify` (default = `true`): Whether to validate server name and certificate if client transport security is enabled.\n- `cert_file` (default = `$HOME/.postgresql/postgresql.crt`): A cerficate used for client authentication, if necessary.\n- `key_file` (default = `$HOME/.postgresql/postgresql.key`): An SSL key used for client authentication, if necessary.\n- `ca_file` (default = \"\"): A set of certificate authorities used to validate the database server's SSL certificate.\n\n- `collection_interval` (default = `10s`): This receiver collects metrics on an interval. This value must be a string readable by Golang's [time.ParseDuration](https://pkg.go.dev/time#ParseDuration). Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`.\n- `initial_delay` (default = `1s`): defines how long this receiver waits before starting.\n\n### Example Configuration\n\n```yaml\nreceivers:\n postgresql:\n endpoint: localhost:5432\n transport: tcp\n username: otel\n password: ${env:POSTGRESQL_PASSWORD}\n databases:\n - otel\n collection_interval: 10s\n tls:\n insecure: false\n insecure_skip_verify: false\n ca_file: /home/otel/authorities.crt\n cert_file: /home/otel/mypostgrescert.crt\n key_file: /home/otel/mypostgreskey.key\n```\n\nThe full list of settings exposed for this receiver are documented [here](./config.go) with detailed sample configurations [here](./testdata/config.yaml). TLS config is documented further under the [opentelemetry collector's configtls package](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md). \n\n## Metrics\n\nDetails about the metrics produced by this receiver can be found in [metadata.yaml](./metadata.yaml)","properties":{"collection_interval":{"title":"collection_interval","type":"string"},"databases":{"items":{"type":"string"},"title":"databases","type":"array"},"endpoint":{"description":"Endpoint configures the address for this network connection.\nFor TCP and UDP networks, the address has the form \"host:port\". The host must be a literal IP address,\nor a host name that can be resolved to IP addresses. The port must be a literal port number or a service name.\nIf the host is a literal IPv6 address it must be enclosed in square brackets, as in \"[2001:db8::1]:80\" or\n\"[fe80::1%zone]:80\". The zone specifies the scope of the literal IPv6 address as defined in RFC 4007.","title":"endpoint","type":"string"},"initial_delay":{"title":"initial_delay","type":"string"},"metrics":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.postgresqlreceiver.internal.metadata.MetricsConfig","title":"metrics"},"password":{"title":"password","type":"string"},"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.postgresqlreceiver.internal.metadata.ResourceAttributesConfig","title":"resource_attributes"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","title":"tls"},"transport":{"description":"Transport to use. Known protocols are \"tcp\", \"tcp4\" (IPv4-only), \"tcp6\" (IPv6-only), \"udp\", \"udp4\" (IPv4-only),\n\"udp6\" (IPv6-only), \"ip\", \"ip4\" (IPv4-only), \"ip6\" (IPv6-only), \"unix\", \"unixgram\" and \"unixpacket\".","title":"transport","type":"string"},"username":{"title":"username","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.postgresqlreceiver.internal.metadata.MetricConfig":{"additionalProperties":false,"description":"MetricConfig provides common config for a particular metric.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.postgresqlreceiver.internal.metadata.MetricsConfig":{"additionalProperties":false,"description":"MetricsConfig provides config for postgresql metrics.","properties":{"postgresql.backends":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.postgresqlreceiver.internal.metadata.MetricConfig","title":"postgresql.backends"},"postgresql.bgwriter.buffers.allocated":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.postgresqlreceiver.internal.metadata.MetricConfig","title":"postgresql.bgwriter.buffers.allocated"},"postgresql.bgwriter.buffers.writes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.postgresqlreceiver.internal.metadata.MetricConfig","title":"postgresql.bgwriter.buffers.writes"},"postgresql.bgwriter.checkpoint.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.postgresqlreceiver.internal.metadata.MetricConfig","title":"postgresql.bgwriter.checkpoint.count"},"postgresql.bgwriter.duration":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.postgresqlreceiver.internal.metadata.MetricConfig","title":"postgresql.bgwriter.duration"},"postgresql.bgwriter.maxwritten":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.postgresqlreceiver.internal.metadata.MetricConfig","title":"postgresql.bgwriter.maxwritten"},"postgresql.blocks_read":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.postgresqlreceiver.internal.metadata.MetricConfig","title":"postgresql.blocks_read"},"postgresql.commits":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.postgresqlreceiver.internal.metadata.MetricConfig","title":"postgresql.commits"},"postgresql.connection.max":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.postgresqlreceiver.internal.metadata.MetricConfig","title":"postgresql.connection.max"},"postgresql.database.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.postgresqlreceiver.internal.metadata.MetricConfig","title":"postgresql.database.count"},"postgresql.db_size":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.postgresqlreceiver.internal.metadata.MetricConfig","title":"postgresql.db_size"},"postgresql.index.scans":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.postgresqlreceiver.internal.metadata.MetricConfig","title":"postgresql.index.scans"},"postgresql.index.size":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.postgresqlreceiver.internal.metadata.MetricConfig","title":"postgresql.index.size"},"postgresql.operations":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.postgresqlreceiver.internal.metadata.MetricConfig","title":"postgresql.operations"},"postgresql.replication.data_delay":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.postgresqlreceiver.internal.metadata.MetricConfig","title":"postgresql.replication.data_delay"},"postgresql.rollbacks":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.postgresqlreceiver.internal.metadata.MetricConfig","title":"postgresql.rollbacks"},"postgresql.rows":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.postgresqlreceiver.internal.metadata.MetricConfig","title":"postgresql.rows"},"postgresql.table.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.postgresqlreceiver.internal.metadata.MetricConfig","title":"postgresql.table.count"},"postgresql.table.size":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.postgresqlreceiver.internal.metadata.MetricConfig","title":"postgresql.table.size"},"postgresql.table.vacuum.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.postgresqlreceiver.internal.metadata.MetricConfig","title":"postgresql.table.vacuum.count"},"postgresql.wal.age":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.postgresqlreceiver.internal.metadata.MetricConfig","title":"postgresql.wal.age"},"postgresql.wal.lag":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.postgresqlreceiver.internal.metadata.MetricConfig","title":"postgresql.wal.lag"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.postgresqlreceiver.internal.metadata.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig provides common config for a particular resource attribute.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.postgresqlreceiver.internal.metadata.ResourceAttributesConfig":{"additionalProperties":false,"description":"ResourceAttributesConfig provides config for postgresql resource attributes.","properties":{"postgresql.database.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.postgresqlreceiver.internal.metadata.ResourceAttributeConfig","title":"postgresql.database.name"},"postgresql.index.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.postgresqlreceiver.internal.metadata.ResourceAttributeConfig","title":"postgresql.index.name"},"postgresql.table.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.postgresqlreceiver.internal.metadata.ResourceAttributeConfig","title":"postgresql.table.name"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.prometheusexecreceiver.Config":{"additionalProperties":false,"description":"Config definition for prometheus_exec configuration","markdownDescription":"# Deprecated prometheus_exec Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [deprecated]: metrics |\n| Distributions | [splunk] |\n\n[deprecated]: https://github.com/open-telemetry/opentelemetry-collector#deprecated\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThis receiver has been deprecated due to security concerns around the ability to specify the execution of\nany arbitrary processes via its configuration. See [#6722](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/6722) for additional details.\n\nThis receiver makes it easy for a user to collect metrics from third-party\nservices **via Prometheus exporters**. It's meant for people who want a\nplug-and-play solution to getting metrics from those third-party services\nthat sometimes simply don't natively export metrics or speak any\ninstrumentation protocols (MySQL, Apache, Nginx, JVM, etc.) while taking\nadvantage of the large [Prometheus\nexporters](https://prometheus.io/docs/instrumenting/exporters/) ecosystem.\n\nThrough the configuration file, you can indicate which binaries to run\n(usually [Prometheus\nexporters](https://prometheus.io/docs/instrumenting/exporters/), which are\ncustom binaries that expose the third-party services' metrics using the\nPrometheus protocol) and `prometheus_exec` will take care of starting the\nspecified binaries with their equivalent Prometheus receiver. This receiver\nalso supports starting binaries with flags and environment variables,\nretrying them with exponential backoff if they crash, string templating, and\nrandom port assignments.\n\n\u003e :information_source: If you do not need to spawn the binaries locally,\nplease consider using the [core Prometheus\nreceiver](../prometheusreceiver)\nor the [Simple Prometheus\nreceiver](../simpleprometheusreceiver).\n\n## Configuration\n\nFor each `prometheus_exec` defined in the configuration file, the specified\ncommand will be run. The command *should* start a binary that exposes\nPrometheus metrics and an equivalent Prometheus receiver will be instantiated\nto scrape its metrics, if configured correctly.\n\nThe full list of settings exposed for this receiver are documented [here](./config.go)\nwith detailed sample configurations [here](./testdata/config.yaml).\n\nThe following settings are required:\n\n- `exec` (no default): The string of the command to be run, with any flags\nneeded. The format should be: `directory/binary_to_run flag1 flag2`.\n\nThe following settings are optional:\n\n- `env` (no default): To use environment variables, under the `env` key\nshould be a list of key (`name`) - value (`value`) pairs. They are\ncase-sensitive. When running a command, these environment variables are added\nto the pre-existing environment variables the Collector is currently running\nwith.\n- `scrape_interval` (default = `60s`): How long the delay between scrapes\ndone by the receiver is.\n- `port` (no default): A number indicating the port the receiver should be\nscraping the binary's metrics from.\n\nTwo important notes about `port`:\n\n1. If it is omitted, we will try to randomly generate a port\nfor you, and retry until we find one that is free. Beware when using this,\nsince you also need to indicate your binary to listen on that same port with\nthe use of a flag and string templating inside the command, which is covered\nin 2.\n\n2. **All** instances of `{{port}}` in any string of any key for the enclosing\n`prometheus_exec` will be replaced with either the port value indicated or\nthe randomly generated one if no port value is set with the `port` key.\nString templating of `{{port}}` is supported in `exec`, `custom_name` and\n`env`.\n\nExample:\n\n```yaml\nreceivers:\n # this receiver will listen on port 9117\n prometheus_exec/apache:\n exec: ./apache_exporter\n port: 9117\n\n # this receiver will listen on port 9187 and {{port}} inside the command will become 9187\n prometheus_exec/postgresql:\n exec: ./postgres_exporter --web.listen-address=:{{port}}\n port: 9187\n\n # this receiver will listen on a random port and that port will be substituting the {{port}} inside the command\n prometheus_exec/mysql:\n exec: ./mysqld_exporter --web.listen-address=:{{port}}\n scrape_interval: 60s\n env:\n - name: DATA_SOURCE_NAME\n value: user:password@(hostname:port)/dbname\n - name: SECONDARY_PORT\n value: {{port}}\n```\n\nThe full list of settings exposed for this receiver are documented [here](./config.go)\nwith detailed sample configurations [here](./testdata/config.yaml).","properties":{"env":{"description":"Env is a list of env variables to pass to a specific command","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.prometheusexecreceiver.subprocessmanager.EnvConfig"},"title":"env","type":"array"},"exec":{"description":"Command is the command to be run (binary + flags, separated by commas)","title":"exec","type":"string"},"port":{"description":"Port is the port assigned to the Receiver, and to the {{port}} template variables","title":"port","type":"integer"},"scrape_interval":{"description":"Generic receiver config\nScrapeInterval is the time between each scrape completed by the Receiver","title":"scrape_interval","type":"string"},"scrape_timeout":{"description":"ScrapeTimeout is the time to wait before throttling a scrape request","title":"scrape_timeout","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.prometheusexecreceiver.subprocessmanager.EnvConfig":{"additionalProperties":false,"description":"EnvConfig is the config definition of each key-value pair for environment variables","properties":{"name":{"description":"Name is the name of the environment variable","title":"name","type":"string"},"value":{"description":"Value is the value of the variable","title":"value","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.prometheusreceiver.Config":{"additionalProperties":false,"description":"Config defines configuration for Prometheus receiver.","markdownDescription":"# Prometheus Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: metrics |\n| Distributions | [core], [contrib], [aws], [observiq], [splunk], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[aws]: https://github.com/aws-observability/aws-otel-collector\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nReceives metric data in [Prometheus](https://prometheus.io/) format. See the\n[Design](DESIGN.md) for additional information on this receiver.\n\n## ⚠️ Warning\n\nNote: This component is currently work in progress. It has several limitations\nand please don't use it if the following limitations is a concern:\n\n* Collector cannot auto-scale the scraping yet when multiple replicas of the\n collector is run. \n* When running multiple replicas of the collector with the same config, it will\n scrape the targets multiple times.\n* Users need to configure each replica with different scraping configuration\n if they want to manually shard the scraping.\n* The Prometheus receiver is a stateful component.\n\n## Unsupported features\nThe Prometheus receiver is meant to minimally be a drop-in replacement for Prometheus. However,\nthere are advanced features of Prometheus that we don't support and thus explicitly will return\nan error for if the receiver's configuration YAML/code contains any of the following\n\n- [x] alert_config.alertmanagers\n- [x] alert_config.relabel_configs\n- [x] remote_read\n- [x] remote_write\n- [x] rule_files\n\n\n## Getting Started\n\nThis receiver is a drop-in replacement for getting Prometheus to scrape your\nservices. It supports [the full set of Prometheus configuration in `scrape_config`][sc],\nincluding service discovery. Just like you would write in a YAML configuration\nfile before starting Prometheus, such as with:\n\n**Note**: Since the collector configuration supports env variable substitution\n`$` characters in your prometheus configuration are interpreted as environment\nvariables. If you want to use $ characters in your prometheus configuration,\nyou must escape them using `$$`.\n\n```shell\nprometheus --config.file=prom.yaml\n```\n\n**Feature gates**:\n\n- `receiver.prometheusreceiver.UseCreatedMetric`: Start time for Summary, Histogram \n and Sum metrics can be retrieved from `_created` metrics. Currently, this behaviour\n is disabled by default. To enable it, use the following feature gate option:\n\n```shell\n\"--feature-gates=receiver.prometheusreceiver.UseCreatedMetric\"\n```\n\nYou can copy and paste that same configuration under:\n\n```yaml\nreceivers:\n prometheus:\n config:\n```\n\nFor example:\n\n```yaml\nreceivers:\n prometheus:\n config:\n scrape_configs:\n - job_name: 'otel-collector'\n scrape_interval: 5s\n static_configs:\n - targets: ['0.0.0.0:8888']\n - job_name: k8s\n kubernetes_sd_configs:\n - role: pod\n relabel_configs:\n - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]\n regex: \"true\"\n action: keep\n metric_relabel_configs:\n - source_labels: [__name__]\n regex: \"(request_duration_seconds.*|response_duration_seconds.*)\"\n action: keep\n```\n\n## OpenTelemetry Operator \nAdditional to this static job definitions this receiver allows to query a list of jobs from the \nOpenTelemetryOperators TargetAllocator or a compatible endpoint. \n\n```yaml\nreceivers:\n prometheus:\n target_allocator:\n endpoint: http://my-targetallocator-service\n interval: 30s\n collector_id: collector-1\n```\n## Exemplars\nThis receiver accepts exemplars coming in Prometheus format and converts it to OTLP format.\n1. Value is expected to be received in `float64` format\n2. Timestamp is expected to be received in `ms`\n3. Labels with key `span_id` in prometheus exemplars are set as OTLP `span id` and labels with key `trace_id` are set as `trace id`\n4. Rest of the labels are copied as it is to OTLP format\n\n[sc]: https://github.com/prometheus/prometheus/blob/v2.28.1/docs/configuration/configuration.md#scrape_config","properties":{"buffer_count":{"title":"buffer_count","type":"integer"},"buffer_period":{"title":"buffer_period","type":"string"},"config":{"description":"ConfigPlaceholder is just an entry to make the configuration pass a check\nthat requires that all keys present in the config actually exist on the\nstructure, ie.: it will error if an unknown key is present.","title":"config"},"start_time_metric_regex":{"title":"start_time_metric_regex","type":"string"},"target_allocator":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.prometheusreceiver.targetAllocator","title":"target_allocator"},"use_start_time_metric":{"description":"UseStartTimeMetric enables retrieving the start time of all counter metrics\nfrom the process_start_time_seconds metric. This is only correct if all counters on that endpoint\nstarted after the process start time, and the process is the only actor exporting the metric after\nthe process started. It should not be used in \"exporters\" which export counters that may have\nstarted before the process itself. Use only if you know what you are doing, as this may result\nin incorrect rate calculations.","title":"use_start_time_metric","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.prometheusreceiver.targetAllocator":{"additionalProperties":false,"properties":{"collector_id":{"title":"collector_id","type":"string"},"endpoint":{"title":"endpoint","type":"string"},"http_sd_config":{"title":"http_sd_config"},"interval":{"title":"interval","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.pulsarreceiver.Athenz":{"additionalProperties":false,"properties":{"key_id":{"title":"key_id","type":"string"},"principal_header":{"title":"principal_header","type":"string"},"private_key":{"title":"private_key","type":"string"},"provider_domain":{"title":"provider_domain","type":"string"},"tenant_domain":{"title":"tenant_domain","type":"string"},"tenant_service":{"title":"tenant_service","type":"string"},"zts_url":{"title":"zts_url","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.pulsarreceiver.Authentication":{"additionalProperties":false,"properties":{"athenz":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.pulsarreceiver.Athenz","title":"athenz"},"oauth2":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.pulsarreceiver.OAuth2","title":"oauth2"},"tls":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.pulsarreceiver.TLS","title":"tls"},"token":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.pulsarreceiver.Token","title":"token"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.pulsarreceiver.Config":{"additionalProperties":false,"markdownDescription":"# Pulsar Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [alpha]: metrics, traces, logs |\n| Distributions | [contrib] |\n\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n\u003c!-- end autogenerated section --\u003e\n\nPulsar receiver receives logs, metrics, and traces from Pulsar.\n\n## Getting Started\n\nThe following settings can be optionally configured:\n- `endpoint` (default = pulsar://localhost:6650): The url of pulsar cluster.\n- `topic` (default = otlp_spans for traces, otlp_metrics for metrics, otlp_logs for logs): The name of the pulsar topic to consume from.\n- `encoding` (default = otlp_proto): The encoding of the payload sent to pulsar. Available encodings:\n - `otlp_proto`: the payload is deserialized to `ExportTraceServiceRequest`.\n - `jaeger_proto`: the payload is deserialized to a single Jaeger proto `Span`.\n - `jaeger_json`: the payload is deserialized to a single Jaeger JSON Span using `jsonpb`.\n - `zipkin_proto`: the payload is deserialized into a list of Zipkin proto spans.\n - `zipkin_json`: the payload is deserialized into a list of Zipkin V2 JSON spans.\n - `zipkin_thrift`: the payload is deserialized into a list of Zipkin Thrift spans.\n- `consumer_name`: specifies the consumer name.\n- `auth`\n - `tls`\n - `cert_file`:\n - `key_file`:\n - `token`\n - `token`\n - `oauth2`\n - `issuer_url`:\n - `client_id`:\n - `audience`: \n - `athenz`\n - `provider_domain`:\n - `tenant_domain`:\n - `tenant_service`:\n - `private_key`:\n - `key_id`:\n - `principal_header`:\n - `zts_url`:\n- `subscription` (default = otlp_subscription): the subscription name of consumer.\n- `tls_trust_certs_file_path`: path to the CA cert. For a client this verifies the server certificate. Should\n only be used if `insecure` is set to true.\n- `tls_allow_insecure_connection`: configure whether the Pulsar client accept untrusted TLS certificate from broker (default: false)\n\n\nExample configuration:\n```yaml\nreceivers:\n pulsar:\n endpoint: pulsar://localhost:6650\n topic: otlp-spans\n subscription: otlp_spans_sub\n consumer_name: otlp_spans_sub_1\n encoding: otlp_proto\n auth:\n tls:\n cert_file: cert.pem\n key_file: key.pem\n tls_allow_insecure_connection: false\n tls_trust_certs_file_path: ca.pem\n```","properties":{"auth":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.pulsarreceiver.Authentication","title":"auth"},"consumer_name":{"description":"Name specifies the consumer name.","title":"consumer_name","type":"string"},"encoding":{"description":"Encoding of the messages (default \"otlp_proto\")","title":"encoding","type":"string"},"endpoint":{"description":"Configure the service URL for the Pulsar service.","title":"endpoint","type":"string"},"subscription":{"description":"The Subscription that receiver will be consuming messages from (default \"otlp_subscription\")","title":"subscription","type":"string"},"tls_allow_insecure_connection":{"description":"Configure whether the Pulsar client accept untrusted TLS certificate from broker (default: false)","title":"tls_allow_insecure_connection","type":"boolean"},"tls_trust_certs_file_path":{"description":"Set the path to the trusted TLS certificate file","title":"tls_trust_certs_file_path","type":"string"},"topic":{"description":"The topic of pulsar to consume logs,metrics,traces. (default = \"otlp_traces\" for traces,\n\"otlp_metrics\" for metrics, \"otlp_logs\" for logs)","title":"topic","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.pulsarreceiver.OAuth2":{"additionalProperties":false,"properties":{"audience":{"title":"audience","type":"string"},"client_id":{"title":"client_id","type":"string"},"issuer_url":{"title":"issuer_url","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.pulsarreceiver.TLS":{"additionalProperties":false,"properties":{"cert_file":{"title":"cert_file","type":"string"},"key_file":{"title":"key_file","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.pulsarreceiver.Token":{"additionalProperties":false,"properties":{"token":{"title":"token","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.purefareceiver.Config":{"additionalProperties":false,"description":"Config relating to Array Metric Scraper.","markdownDescription":"# Pure Storage FlashArray Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [development]: metrics |\n| Distributions | [contrib], [sumo] |\n\n[development]: https://github.com/open-telemetry/opentelemetry-collector#development\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe Pure Storage FlashArray receiver, receives metrics from Pure Storage internal services hosts.\n\n## Configuration\n\nThe following settings are required:\n - `endpoint` (default: `http://172.0.0.0:9490/metrics/array`): The URL of the scraper selected endpoint\n\nExample:\n\n```yaml\nextensions:\n bearertokenauth/array01:\n token: \"...\"\n\nreceivers:\n purefa:\n endpoint: http://172.0.0.1:9490/metrics\n array:\n - address: array01\n auth:\n authenticator: bearertokenauth/array01\n hosts:\n - address: array01\n auth:\n authenticator: bearertokenauth/array01\n directories:\n - address: array01\n auth:\n authenticator: bearertokenauth/array01\n pods:\n - address: array01\n auth:\n authenticator: bearertokenauth/array01\n volumes:\n - address: array01\n auth:\n authenticator: bearertokenauth/array01\n env: dev\n settings:\n reload_intervals:\n array: 10s\n hosts: 13s\n directories: 15s\n pods: 30s\n volumes: 25s\n```\n\nThe full list of settings exposed for this receiver are documented [here](./config.go)\nwith detailed sample configurations [here](./testdata/config.yaml).","properties":{"CustomRoundTripper":{"description":"Custom Round Tripper to allow for individual components to intercept HTTP requests","title":"CustomRoundTripper"},"array":{"description":"Array represents the list of arrays to query","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.purefareceiver.internal.ScraperConfig"},"title":"array","type":"array"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing HTTP calls.","title":"auth"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"directories":{"description":"Directories represents the list of directories to query","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.purefareceiver.internal.ScraperConfig"},"title":"directories","type":"array"},"endpoint":{"description":"The target URL to send data to (e.g.: http://some.url:9411/v1/traces).","title":"endpoint","type":"string"},"env":{"description":"Env represents the respective environment value valid to scrape","title":"env","type":"string"},"headers":{"description":"Additional headers attached to each HTTP request sent by the client.\nExisting header values are overwritten if collision happens.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"hosts":{"description":"Hosts represents the list of hosts to query","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.purefareceiver.internal.ScraperConfig"},"title":"hosts","type":"array"},"idle_conn_timeout":{"description":"IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"idle_conn_timeout","type":"string"},"max_conns_per_host":{"description":"MaxConnsPerHost limits the total number of connections per host, including connections in the dialing,\nactive, and idle states.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_conns_per_host","type":"integer"},"max_idle_conns":{"description":"MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns","type":"integer"},"max_idle_conns_per_host":{"description":"MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns_per_host","type":"integer"},"pods":{"description":"Pods represents the list of pods to query","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.purefareceiver.internal.ScraperConfig"},"title":"pods","type":"array"},"read_buffer_size":{"description":"ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize.","title":"read_buffer_size","type":"integer"},"settings":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.purefareceiver.Settings","description":"Settings contains settings for the individual scrapers","title":"settings"},"timeout":{"description":"Timeout parameter configures `http.Client.Timeout`.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"volumes":{"description":"Volumes represents the list of volumes to query","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.purefareceiver.internal.ScraperConfig"},"title":"volumes","type":"array"},"write_buffer_size":{"description":"WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize.","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.purefareceiver.ReloadIntervals":{"additionalProperties":false,"properties":{"array":{"title":"array","type":"string"},"directories":{"title":"directories","type":"string"},"hosts":{"title":"hosts","type":"string"},"pods":{"title":"pods","type":"string"},"volumes":{"title":"volumes","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.purefareceiver.Settings":{"additionalProperties":false,"properties":{"reload_intervals":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.purefareceiver.ReloadIntervals","title":"reload_intervals"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.purefareceiver.internal.ScraperConfig":{"additionalProperties":false,"properties":{"address":{"title":"address","type":"string"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","title":"auth"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.purefbreceiver.Config":{"additionalProperties":false,"description":"Config relating to Array Metric Scraper.","markdownDescription":"# Pure Storage FlashBlade Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [development]: metrics |\n| Distributions | [contrib], [sumo] |\n\n[development]: https://github.com/open-telemetry/opentelemetry-collector#development\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe Pure Storage FlashBlade receiver, receives metrics from Pure Storage FlashBlade via the [Pure Storage FlashBlade OpenMetrics Exporter](https://github.com/PureStorage-OpenConnect/pure-fb-openmetrics-exporter)\n\n## Configuration\n\nThe following settings are required:\n - `endpoint` (default: `http://172.31.60.207:9491/metrics/array`): The URL of the scraper selected endpoint\n\n### Important \n\n- Only endpoints explicitly added on will be scraped. e.g: `clients`\n\nExample:\n\n```yaml\nextensions:\n bearertokenauth/fb01:\n token: \"...\"\n\nreceivers:\n purefb:\n endpoint: http://172.31.60.207:9491/metrics\n arrays:\n - address: fb01\n auth:\n authenticator: bearertokenauth/fb01\n clients:\n - address: fb01\n auth:\n authenticator: bearertokenauth/fb01\n env: dev\n settings:\n reload_intervals:\n array: 5m\n clients: 6m\n usage: 6m\n\n```\n\nThe full list of settings exposed for this receiver are documented [here](./config.go)\nwith detailed sample configurations [here](./testdata/config.yaml).","properties":{"CustomRoundTripper":{"description":"Custom Round Tripper to allow for individual components to intercept HTTP requests","title":"CustomRoundTripper"},"arrays":{"description":"Arrays represents the list of arrays to query","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.purefbreceiver.internal.ScraperConfig"},"title":"arrays","type":"array"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing HTTP calls.","title":"auth"},"clients":{"description":"Clients represents the list of clients metrics","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.purefbreceiver.internal.ScraperConfig"},"title":"clients","type":"array"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"endpoint":{"description":"The target URL to send data to (e.g.: http://some.url:9411/v1/traces).","title":"endpoint","type":"string"},"env":{"description":"Env represents the respective environment value valid to scrape","title":"env","type":"string"},"headers":{"description":"Additional headers attached to each HTTP request sent by the client.\nExisting header values are overwritten if collision happens.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"idle_conn_timeout":{"description":"IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"idle_conn_timeout","type":"string"},"max_conns_per_host":{"description":"MaxConnsPerHost limits the total number of connections per host, including connections in the dialing,\nactive, and idle states.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_conns_per_host","type":"integer"},"max_idle_conns":{"description":"MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns","type":"integer"},"max_idle_conns_per_host":{"description":"MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns_per_host","type":"integer"},"read_buffer_size":{"description":"ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize.","title":"read_buffer_size","type":"integer"},"settings":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.purefbreceiver.Settings","description":"Settings contains settings for the individual scrapers","title":"settings"},"timeout":{"description":"Timeout parameter configures `http.Client.Timeout`.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"usage":{"description":"Usage represents the list of usage to query","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.purefbreceiver.internal.ScraperConfig"},"title":"usage","type":"array"},"write_buffer_size":{"description":"WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize.","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.purefbreceiver.ReloadIntervals":{"additionalProperties":false,"properties":{"array":{"title":"array","type":"string"},"clients":{"title":"clients","type":"string"},"usage":{"title":"usage","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.purefbreceiver.Settings":{"additionalProperties":false,"properties":{"reload_intervals":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.purefbreceiver.ReloadIntervals","title":"reload_intervals"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.purefbreceiver.internal.ScraperConfig":{"additionalProperties":false,"properties":{"address":{"title":"address","type":"string"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","title":"auth"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.rabbitmqreceiver.Config":{"additionalProperties":false,"description":"Config defines the configuration for the various elements of the receiver agent.","markdownDescription":"# RabbitMQ Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: metrics |\n| Distributions | [contrib], [observiq], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThis receiver fetches stats from a RabbitMQ node using the [RabbitMQ Management Plugin](https://www.rabbitmq.com/management.html).\n\n\u003e :construction: This receiver is in **BETA**. Configuration fields and metric data model are subject to change.\n## Prerequisites\n\nThis receiver supports RabbitMQ versions `3.8` and `3.9`.\n\nThe RabbitMQ Management Plugin must be enabled by following the [official instructions](https://www.rabbitmq.com/management.html#getting-started).\n\nAlso, a user with at least [monitoring](https://www.rabbitmq.com/management.html#permissions) level permissions must be used for monitoring.\n\n## Configuration\n\nThe following settings are required:\n- `username`\n- `password`\n\nThe following settings are optional:\n\n- `endpoint` (default: `http://localhost:15672`): The URL of the node to be monitored.\n- `collection_interval` (default = `10s`): This receiver collects metrics on an interval. Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`.\n- `tls` (defaults defined [here](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md)): TLS control. By default insecure settings are rejected and certificate verification is on.\n\n### Example Configuration\n\n```yaml\nreceivers:\n rabbitmq:\n endpoint: http://localhost:15672\n username: otelu\n password: ${env:RABBITMQ_PASSWORD}\n collection_interval: 10s\n```\n\nThe full list of settings exposed for this receiver are documented [here](./config.go) with detailed sample configurations [here](./testdata/config.yaml). TLS config is documented further under the [opentelemetry collector's configtls package](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md).\n\n## Metrics\n\nDetails about the metrics produced by this receiver can be found in [metadata.yaml](./metadata.yaml)","properties":{"CustomRoundTripper":{"description":"Custom Round Tripper to allow for individual components to intercept HTTP requests","title":"CustomRoundTripper"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing HTTP calls.","title":"auth"},"collection_interval":{"title":"collection_interval","type":"string"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"endpoint":{"description":"The target URL to send data to (e.g.: http://some.url:9411/v1/traces).","title":"endpoint","type":"string"},"headers":{"description":"Additional headers attached to each HTTP request sent by the client.\nExisting header values are overwritten if collision happens.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"idle_conn_timeout":{"description":"IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"idle_conn_timeout","type":"string"},"initial_delay":{"title":"initial_delay","type":"string"},"max_conns_per_host":{"description":"MaxConnsPerHost limits the total number of connections per host, including connections in the dialing,\nactive, and idle states.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_conns_per_host","type":"integer"},"max_idle_conns":{"description":"MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns","type":"integer"},"max_idle_conns_per_host":{"description":"MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns_per_host","type":"integer"},"metrics":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.rabbitmqreceiver.internal.metadata.MetricsConfig","title":"metrics"},"password":{"title":"password","type":"string"},"read_buffer_size":{"description":"ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize.","title":"read_buffer_size","type":"integer"},"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.rabbitmqreceiver.internal.metadata.ResourceAttributesConfig","title":"resource_attributes"},"timeout":{"description":"Timeout parameter configures `http.Client.Timeout`.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"username":{"title":"username","type":"string"},"write_buffer_size":{"description":"WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize.","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.rabbitmqreceiver.internal.metadata.MetricConfig":{"additionalProperties":false,"description":"MetricConfig provides common config for a particular metric.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.rabbitmqreceiver.internal.metadata.MetricsConfig":{"additionalProperties":false,"description":"MetricsConfig provides config for rabbitmq metrics.","properties":{"rabbitmq.consumer.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.rabbitmqreceiver.internal.metadata.MetricConfig","title":"rabbitmq.consumer.count"},"rabbitmq.message.acknowledged":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.rabbitmqreceiver.internal.metadata.MetricConfig","title":"rabbitmq.message.acknowledged"},"rabbitmq.message.current":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.rabbitmqreceiver.internal.metadata.MetricConfig","title":"rabbitmq.message.current"},"rabbitmq.message.delivered":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.rabbitmqreceiver.internal.metadata.MetricConfig","title":"rabbitmq.message.delivered"},"rabbitmq.message.dropped":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.rabbitmqreceiver.internal.metadata.MetricConfig","title":"rabbitmq.message.dropped"},"rabbitmq.message.published":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.rabbitmqreceiver.internal.metadata.MetricConfig","title":"rabbitmq.message.published"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.rabbitmqreceiver.internal.metadata.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig provides common config for a particular resource attribute.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.rabbitmqreceiver.internal.metadata.ResourceAttributesConfig":{"additionalProperties":false,"description":"ResourceAttributesConfig provides config for rabbitmq resource attributes.","properties":{"rabbitmq.node.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.rabbitmqreceiver.internal.metadata.ResourceAttributeConfig","title":"rabbitmq.node.name"},"rabbitmq.queue.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.rabbitmqreceiver.internal.metadata.ResourceAttributeConfig","title":"rabbitmq.queue.name"},"rabbitmq.vhost.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.rabbitmqreceiver.internal.metadata.ResourceAttributeConfig","title":"rabbitmq.vhost.name"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.receivercreator.Config":{"additionalProperties":false,"description":"Config defines configuration for receiver_creator.","markdownDescription":"# Receiver Creator\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [alpha]: logs, traces |\n| | [beta]: metrics |\n| Distributions | [contrib], [splunk], [sumo] |\n\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThis receiver can instantiate other receivers at runtime based on whether\nobserved endpoints match a configured rule. To use the receiver creator, you\nmust first configure one or more\n[observers](../../extension/observer/README.md) that will discover networked\nendpoints that you may be interested in. The configured rules will be\nevaluated for each endpoint discovered. If the rule evaluates to true then\nthe receiver for that rule will be started against the matched endpoint.\n\nIf you use the receiver creator in multiple pipelines of differing telemetry types,\nbut a given dynamically instantiated receiver doesn't support one of the pipeline's type,\nit will effectively lead to a logged no-op that won't cause a collector service failure.\n\n## Configuration\n\n**watch_observers**\n\nA list of observers previously defined to be run in the `extensions` section.\nreceiver_creator will watch for endpoints generated by these observers.\n\n**receivers**\n\nA map of receiver names (e.g. `redis/1`) to a template for when and how to\ninstantiate that receiver.\n\n**receivers.\u0026lt;receiver_type/id\u0026gt;.rule**\n\nRule expression using [expvar\nsyntax](https://github.com/antonmedv/expr/blob/master/docs/Language-Definition.md).\nVariables available are detailed below in [Rule\nExpressions](#rule-expressions).\n\n**receivers.\u0026lt;receiver_type/id\u0026gt;.config**\n\nThis is configuration that will be used when creating the receiver at\nruntime.\n\nThis option can use static and dynamic configuration values. Static values\nare normal YAML values. However, the value can also be dynamically constructed\nfrom the discovered endpoint object. Dynamic values are surrounded by\nbackticks (\\`). If a literal backtick is needed use \\\\` to escape it. Dynamic\nvalues can be used with static values in which case they are concatenated.\nFor example:\n\n```yaml\nconfig:\n secure_url: https://`pod.labels[\"secure_host\"]`\n```\n\nThe value of `secure_url` will be `https://` concatenated with the value of\nthe `secure_host` label.\n\nThis can also be used when the discovered endpoint needs to be changed\ndynamically. For instance, suppose the IP `1.2.3.4` is discovered without a\nport but the port needs to be set inside endpoint. You could do:\n\n```yaml\nconfig:\n endpoint: '`endpoint`:8080'\n```\n\nIf your target receiver provides an `endpoint` config field and you aren't\nmanually setting it like the above example, the observer endpoint target value\nwill automatically be sourced. If no `endpoint` field is available you are\nrequired to specify any necessary fields.\n\n**receivers.resource_attributes**\n\n```yaml\nresource_attributes:\n \u003cendpoint type\u003e:\n \u003cattribute\u003e: \u003cattribute value\u003e\n```\n\nThis setting controls what resource attributes are set on telemetry emitted from the created receiver. These attributes can be set from [values in the endpoint](#rule-expressions) that was matched by the `rule`. These attributes vary based on the endpoint type. These defaults can be disabled by setting the attribute to be removed to an empty value. Note that the values can be dynamic and processed the same as in `config`.\n\nNote that the backticks below are not typos--they indicate the value is set dynamically.\n\n`type == \"pod\"`\n\n| Resource Attribute | Default |\n|--------------------|---------------|\n| k8s.pod.name | \\`name\\` |\n| k8s.pod.uid | \\`uid\\` |\n| k8s.namespace.name | \\`namespace\\` |\n\n`type == \"port\"`\n\n| Resource Attribute | Default |\n|--------------------|-------------------|\n| k8s.pod.name | \\`pod.name\\` |\n| k8s.pod.uid | \\`pod.uid\\` |\n| k8s.namespace.name | \\`pod.namespace\\` |\n\n`type == \"container\"`\n\n| Resource Attribute | Default |\n|----------------------|-------------------|\n| container.name | \\`name\\` |\n| container.image.name | \\`image\\` |\n\n`type == \"hostport\"`\n\nNone\n\n`type == \"k8s.node\"`\n\n| Resource Attribute | Default |\n|--------------------|-------------------|\n| k8s.node.name | \\`name\\` |\n| k8s.node.uid | \\`uid\\` |\n\nSee `redis/2` in [examples](#examples).\n\n\n**receivers.\u0026lt;receiver_type/id\u0026gt;.resource_attributes**\n\n```yaml\nreceivers:\n \u003creceiver_type\u003e:\n resource_attributes:\n \u003cattribute\u003e: \u003cattribute string value\u003e\n```\n\nSimilar to the per-endpoint type `resource_attributes` described above but for individual receiver instances. Duplicate attribute entries (including the empty string) in this receiver-specific mapping take precedence. These attribute values also support expansion from endpoint environment content. At this time their values must be strings.\n\n## Rule Expressions\n\nEach rule must start with `type == (\"pod\"|\"port\"|\"hostport\"|\"container\"|\"k8s.node\") \u0026\u0026` such that the rule matches\nonly one endpoint type. Depending on the type of endpoint the rule is\ntargeting it will have different variables available.\n\n### Pod\n\n| Variable | Description |\n|-------------|-----------------------------------|\n| type | `\"pod\"` |\n| id | ID of source endpoint |\n| name | name of the pod |\n| namespace | namespace of the pod |\n| uid | unique id of the pod |\n| labels | map of labels set on the pod |\n| annotations | map of annotations set on the pod |\n\n### Port\n\n| Variable | Description |\n|-----------------|-----------------------------------------|\n| type | `\"port\"` |\n| id | ID of source endpoint |\n| name | container port name |\n| port | port number |\n| protocol | The transport protocol (\"TCP\" or \"UDP\") |\n| pod.name | name of the owning pod |\n| pod.namespace | namespace of the pod |\n| pod.uid | unique id of the pod |\n| pod.labels | map of labels of the owning pod |\n| pod.annotations | map of annotations of the owning pod |\n\n### Host Port\n\n| Variable | Description |\n|---------------|--------------------------------------------------|\n| type | `\"hostport\"` |\n| id | ID of source endpoint |\n| process_name | Name of the process |\n| command | Command line with the used to invoke the process |\n| is_ipv6 | true if endpoint is IPv6, otherwise false |\n| port | Port number |\n| transport | The transport protocol (\"TCP\" or \"UDP\") |\n\n### Container\n\n| Variable | Description |\n|----------------|-------------------------------------------------------------------|\n| type | `\"container\"` |\n| id | ID of source endpoint |\n| name | Primary name of the container |\n| image | Name of the container image |\n| port | Exposed port of the container |\n| alternate_port | Exposed port accessed through redirection, such as a mapped port |\n| command | The command used to invoke the process of the container |\n| container_id | ID of the container |\n| host | Hostname or IP of the underlying host the container is running on |\n| transport | Transport protocol used by the endpoint (TCP or UDP) |\n| labels | User-specified metadata labels on the container |\n\n### Kubernetes Node\n\n| Variable | Description |\n|----------------|-------------------------------------------------------------------|\n| type | `\"k8s.node\"` |\n| id | ID of source endpoint |\n| name | The name of the Kubernetes node |\n| uid | The unique ID for the node |\n| hostname | The node's hostname as reported by its Status object |\n| external_ip | The node's external IP address as reported by its Status object |\n| internal_ip | The node's internal IP address as reported by its Status object |\n| external_dns | The node's external DNS record as reported by its Status object |\n| internal_dns | The node's internal DNS record as reported by its Status object |\n| annotations | A key-value map of non-identifying, user-specified node metadata |\n| labels | A key-value map of user-specified node metadata |\n| kubelet_endpoint_port | The node Status object's DaemonEndpoints.KubeletEndpoint.Port value |\n\n## Examples\n\n```yaml\nextensions:\n # Configures the Kubernetes observer to watch for pod start and stop events.\n k8s_observer:\n host_observer:\n\nreceivers:\n receiver_creator/1:\n # Name of the extensions to watch for endpoints to start and stop.\n watch_observers: [k8s_observer]\n receivers:\n prometheus_simple:\n # Configure prometheus scraping if standard prometheus annotations are set on the pod.\n rule: type == \"pod\" \u0026\u0026 annotations[\"prometheus.io/scrape\"] == \"true\"\n config:\n metrics_path: '`\"prometheus.io/path\" in annotations ? annotations[\"prometheus.io/path\"] : \"/metrics\"`'\n endpoint: '`endpoint`:`\"prometheus.io/port\" in annotations ? annotations[\"prometheus.io/port\"] : 9090`'\n resource_attributes:\n an.attribute: a.value\n # Dynamic configuration values\n app.version: '`labels[\"app_version\"]`'\n\n redis/1:\n # If this rule matches an instance of this receiver will be started.\n rule: type == \"port\" \u0026\u0026 port == 6379\n config:\n # Static receiver-specific config.\n password: secret\n # Dynamic configuration value.\n collection_interval: '`pod.annotations[\"collection_interval\"]`'\n\n redis/2:\n # Set a resource attribute based on endpoint value.\n rule: type == \"port\" \u0026\u0026 port == 6379\n\n resource_attributes:\n # Dynamic configuration values, overwriting default attributes`\n pod:\n service.name: '`labels[\"service_name\"]`'\n app: '`labels[\"app\"]`'\n port:\n service.name: '`pod.labels[\"service_name\"]`'\n app: '`pod.labels[\"app\"]`'\n receiver_creator/2:\n # Name of the extensions to watch for endpoints to start and stop.\n watch_observers: [host_observer]\n receivers:\n redis/on_host:\n # If this rule matches an instance of this receiver will be started.\n rule: type == \"port\" \u0026\u0026 port == 6379 \u0026\u0026 is_ipv6 == true\n resource_attributes:\n service.name: redis_on_host\n receiver_creator/3:\n watch_observers: [k8s_observer]\n receivers:\n kubeletstats:\n rule: type == \"k8s.node\"\n config:\n auth_type: serviceAccount\n collection_interval: 10s\n endpoint: '`endpoint`:`kubelet_endpoint_port`'\n extra_metadata_labels:\n - container.id\n metric_groups:\n - container\n - pod\n - node\n\nprocessors:\n exampleprocessor:\n\nexporters:\n exampleexporter:\n\nservice:\n pipelines:\n metrics:\n receivers: [receiver_creator/1, receiver_creator/2, receiver_creator/3]\n processors: [exampleprocessor]\n exporters: [exampleexporter]\n extensions: [k8s_observer, host_observer]\n```\n\nThe full list of settings exposed for this receiver are documented [here](./config.go)\nwith detailed sample configurations [here](./testdata/config.yaml).","properties":{"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.receivercreator.resourceAttributes","description":"ResourceAttributes is a map of default resource attributes to add to each resource\nobject received by this receiver from dynamically created receivers.","title":"resource_attributes"},"watch_observers":{"description":"WatchObservers are the extensions to listen to endpoints from.","items":{"type":"string"},"title":"watch_observers","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.receivercreator.resourceAttributes":{"patternProperties":{".*":{"patternProperties":{".*":{"type":"string"}},"type":"object"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.Config":{"additionalProperties":false,"markdownDescription":"# Redis Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: metrics |\n| Distributions | [contrib], [observiq], [splunk], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe Redis receiver is designed to retrieve Redis INFO data from a single Redis\ninstance, build metrics from that data, and send them to the next consumer at a\nconfigurable interval.\n\n## Details\n\nThe Redis INFO command returns information and statistics about a Redis\nserver (see [https://redis.io/commands/info](https://redis.io/commands/info) for\ndetails). The Redis receiver extracts values from the result and converts them to open\ntelemetry metrics. Details about the metrics produced by the Redis receiver\ncan be found by browsing [metric_functions.go](metric_functions.go).\n\nFor example, one of the fields returned by the Redis INFO command is\n`used_cpu_sys` which indicates the system CPU consumed by the Redis server,\nexpressed in seconds, since the start of the Redis instance.\n\nThe Redis receiver turns this data into a gauge...\n\n```go\nfunc usedCPUSys() *redisMetric {\n\treturn \u0026redisMetric{\n\t\tkey: \"used_cpu_sys\",\n\t\tname: \"redis.cpu.time\",\n\t\tunits: \"s\",\n\t\tmdType: metricspb.MetricDescriptor_GAUGE_DOUBLE,\n\t\tlabels: map[string]string{\"state\": \"sys\"},\n\t}\n}\n```\n\nwith a metric name of `redis.cpu.time` and a units value of `s` (seconds).\n\n## Configuration\n\n\u003e :information_source: This receiver is in beta and configuration fields are subject to change.\n\nThe following settings are required:\n\n- `endpoint` (no default): The hostname and port of the Redis instance,\nseparated by a colon.\n\nThe following settings are optional:\n\n- `collection_interval` (default = `10s`): This receiver runs on an interval.\nEach time it runs, it queries Redis, creates metrics, and sends them to the\nnext consumer. The `collection_interval` configuration option tells this\nreceiver the duration between runs. This value must be a string readable by\nGolang's `ParseDuration` function (example: `1h30m`). Valid time units are\n`ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`.\n- `password` (no default): The password used to access the Redis instance;\nmust match the password specified in the `requirepass` server configuration\noption.\n- `transport` (default = `tcp`) Defines the network to use for connecting to the server. Valid Values are `tcp` or `Unix`\n- `tls`:\n - `insecure` (default = true): whether to disable client transport security for the exporter's connection.\n - `ca_file`: path to the CA cert. For a client this verifies the server certificate. Should only be used if `insecure` is set to false.\n - `cert_file`: path to the TLS cert to use for TLS required connections. Should only be used if `insecure` is set to false.\n - `key_file`: path to the TLS key to use for TLS required connections. Should only be used if `insecure` is set to false.\n\nExample:\n\n```yaml\nreceivers:\n redis:\n endpoint: \"localhost:6379\"\n collection_interval: 10s\n password: ${env:REDIS_PASSWORD}\n```\n\n\u003e :information_source: As with all Open Telemetry configuration values, a\nreference to an environment variable is supported. For example, to pick up\nthe value of an environment variable `REDIS_PASSWORD`, you could use a\nconfiguration like the following:\n\n```yaml\nreceivers:\n redis:\n endpoint: \"localhost:6379\"\n collection_interval: 10s\n password: ${env:REDIS_PASSWORD}\n```\n\nThe full list of settings exposed for this receiver are documented [here](./config.go)\nwith detailed sample configurations [here](./testdata/config.yaml).","properties":{"collection_interval":{"title":"collection_interval","type":"string"},"endpoint":{"description":"Endpoint configures the address for this network connection.\nFor TCP and UDP networks, the address has the form \"host:port\". The host must be a literal IP address,\nor a host name that can be resolved to IP addresses. The port must be a literal port number or a service name.\nIf the host is a literal IPv6 address it must be enclosed in square brackets, as in \"[2001:db8::1]:80\" or\n\"[fe80::1%zone]:80\". The zone specifies the scope of the literal IPv6 address as defined in RFC 4007.","title":"endpoint","type":"string"},"initial_delay":{"title":"initial_delay","type":"string"},"metrics":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.MetricsConfig","title":"metrics"},"password":{"description":"Optional password. Must match the password specified in the\nrequirepass server configuration option.","title":"password","type":"string"},"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.ResourceAttributesConfig","title":"resource_attributes"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","title":"tls"},"transport":{"description":"Transport to use. Known protocols are \"tcp\", \"tcp4\" (IPv4-only), \"tcp6\" (IPv6-only), \"udp\", \"udp4\" (IPv4-only),\n\"udp6\" (IPv6-only), \"ip\", \"ip4\" (IPv4-only), \"ip6\" (IPv6-only), \"unix\", \"unixgram\" and \"unixpacket\".","title":"transport","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.MetricConfig":{"additionalProperties":false,"description":"MetricConfig provides common config for a particular metric.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.MetricsConfig":{"additionalProperties":false,"description":"MetricsConfig provides config for redis metrics.","properties":{"redis.clients.blocked":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.MetricConfig","title":"redis.clients.blocked"},"redis.clients.connected":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.MetricConfig","title":"redis.clients.connected"},"redis.clients.max_input_buffer":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.MetricConfig","title":"redis.clients.max_input_buffer"},"redis.clients.max_output_buffer":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.MetricConfig","title":"redis.clients.max_output_buffer"},"redis.cmd.calls":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.MetricConfig","title":"redis.cmd.calls"},"redis.cmd.usec":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.MetricConfig","title":"redis.cmd.usec"},"redis.commands":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.MetricConfig","title":"redis.commands"},"redis.commands.processed":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.MetricConfig","title":"redis.commands.processed"},"redis.connections.received":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.MetricConfig","title":"redis.connections.received"},"redis.connections.rejected":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.MetricConfig","title":"redis.connections.rejected"},"redis.cpu.time":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.MetricConfig","title":"redis.cpu.time"},"redis.db.avg_ttl":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.MetricConfig","title":"redis.db.avg_ttl"},"redis.db.expires":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.MetricConfig","title":"redis.db.expires"},"redis.db.keys":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.MetricConfig","title":"redis.db.keys"},"redis.keys.evicted":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.MetricConfig","title":"redis.keys.evicted"},"redis.keys.expired":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.MetricConfig","title":"redis.keys.expired"},"redis.keyspace.hits":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.MetricConfig","title":"redis.keyspace.hits"},"redis.keyspace.misses":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.MetricConfig","title":"redis.keyspace.misses"},"redis.latest_fork":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.MetricConfig","title":"redis.latest_fork"},"redis.maxmemory":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.MetricConfig","title":"redis.maxmemory"},"redis.memory.fragmentation_ratio":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.MetricConfig","title":"redis.memory.fragmentation_ratio"},"redis.memory.lua":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.MetricConfig","title":"redis.memory.lua"},"redis.memory.peak":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.MetricConfig","title":"redis.memory.peak"},"redis.memory.rss":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.MetricConfig","title":"redis.memory.rss"},"redis.memory.used":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.MetricConfig","title":"redis.memory.used"},"redis.net.input":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.MetricConfig","title":"redis.net.input"},"redis.net.output":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.MetricConfig","title":"redis.net.output"},"redis.rdb.changes_since_last_save":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.MetricConfig","title":"redis.rdb.changes_since_last_save"},"redis.replication.backlog_first_byte_offset":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.MetricConfig","title":"redis.replication.backlog_first_byte_offset"},"redis.replication.offset":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.MetricConfig","title":"redis.replication.offset"},"redis.role":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.MetricConfig","title":"redis.role"},"redis.slaves.connected":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.MetricConfig","title":"redis.slaves.connected"},"redis.uptime":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.MetricConfig","title":"redis.uptime"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig provides common config for a particular resource attribute.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.ResourceAttributesConfig":{"additionalProperties":false,"description":"ResourceAttributesConfig provides config for redis resource attributes.","properties":{"redis.version":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.internal.metadata.ResourceAttributeConfig","title":"redis.version"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.riakreceiver.Config":{"additionalProperties":false,"description":"Config defines the configuration for the various elements of the receiver agent.","markdownDescription":"# Riak Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: metrics |\n| Distributions | [contrib], [observiq], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\n\n\u003c!-- markdown-link-check-disable --\u003e\u003c!-- Failing due to \"unable to verify the first certificate\" --\u003e\nRiak metrics will be collected from the [/stats](https://docs.riak.com/riak/kv/2.2.3/developing/api/http/status) endpoint.\n\nThis Riak receiver will collect metrics for [3.x+](https://github.com/basho/riak/releases)\n\n## Configuration\n\nThe following configuration settings are required:\n\n- `username`\n- `password`\n\nThe following configuration settings are optional:\n\n- `endpoint` (default: `http://localhost:8098`): The URL of the node to be monitored.\n- `collection_interval` (default = `60s`): This receiver collects metrics on an interval. Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`.\n- `initial_delay` (default = `1s`): defines how long this receiver waits before starting.\n- `tls` (defaults defined [here](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md)): TLS control. By default insecure settings are rejected and certificate verification is on.\n\n### Example Configuration\n\n```yaml\nreceivers:\n riak:\n endpoint: http://localhost:8098\n username: otelu\n password: ${env:RIAK_PASSWORD}\n collection_interval: 60s\n```\n\n## Metrics\n\nDetails about the metrics produced by this receiver can be found in [metadata.yaml](./metadata.yaml)","properties":{"CustomRoundTripper":{"description":"Custom Round Tripper to allow for individual components to intercept HTTP requests","title":"CustomRoundTripper"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing HTTP calls.","title":"auth"},"collection_interval":{"title":"collection_interval","type":"string"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"endpoint":{"description":"The target URL to send data to (e.g.: http://some.url:9411/v1/traces).","title":"endpoint","type":"string"},"headers":{"description":"Additional headers attached to each HTTP request sent by the client.\nExisting header values are overwritten if collision happens.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"idle_conn_timeout":{"description":"IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"idle_conn_timeout","type":"string"},"initial_delay":{"title":"initial_delay","type":"string"},"max_conns_per_host":{"description":"MaxConnsPerHost limits the total number of connections per host, including connections in the dialing,\nactive, and idle states.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_conns_per_host","type":"integer"},"max_idle_conns":{"description":"MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns","type":"integer"},"max_idle_conns_per_host":{"description":"MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns_per_host","type":"integer"},"metrics":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.riakreceiver.internal.metadata.MetricsBuilderConfig","title":"metrics"},"password":{"title":"password","type":"string"},"read_buffer_size":{"description":"ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize.","title":"read_buffer_size","type":"integer"},"timeout":{"description":"Timeout parameter configures `http.Client.Timeout`.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"username":{"title":"username","type":"string"},"write_buffer_size":{"description":"WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize.","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.riakreceiver.internal.metadata.MetricConfig":{"additionalProperties":false,"description":"MetricConfig provides common config for a particular metric.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.riakreceiver.internal.metadata.MetricsBuilderConfig":{"additionalProperties":false,"description":"MetricsBuilderConfig is a configuration for riak metrics builder.","properties":{"metrics":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.riakreceiver.internal.metadata.MetricsConfig","title":"metrics"},"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.riakreceiver.internal.metadata.ResourceAttributesConfig","title":"resource_attributes"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.riakreceiver.internal.metadata.MetricsConfig":{"additionalProperties":false,"description":"MetricsConfig provides config for riak metrics.","properties":{"riak.memory.limit":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.riakreceiver.internal.metadata.MetricConfig","title":"riak.memory.limit"},"riak.node.operation.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.riakreceiver.internal.metadata.MetricConfig","title":"riak.node.operation.count"},"riak.node.operation.time.mean":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.riakreceiver.internal.metadata.MetricConfig","title":"riak.node.operation.time.mean"},"riak.node.read_repair.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.riakreceiver.internal.metadata.MetricConfig","title":"riak.node.read_repair.count"},"riak.vnode.index.operation.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.riakreceiver.internal.metadata.MetricConfig","title":"riak.vnode.index.operation.count"},"riak.vnode.operation.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.riakreceiver.internal.metadata.MetricConfig","title":"riak.vnode.operation.count"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.riakreceiver.internal.metadata.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig provides common config for a particular resource attribute.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.riakreceiver.internal.metadata.ResourceAttributesConfig":{"additionalProperties":false,"description":"ResourceAttributesConfig provides config for riak resource attributes.","properties":{"riak.node.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.riakreceiver.internal.metadata.ResourceAttributeConfig","title":"riak.node.name"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sapmreceiver.Config":{"additionalProperties":false,"description":"Config defines configuration for SAPM receiver.","markdownDescription":"# SAPM Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: traces |\n| Distributions | [contrib], [observiq], [splunk], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe SAPM receiver builds on the Jaeger proto. This allows the collector to\nreceive traces from other collectors or the SignalFx Smart Agent. SAPM proto\nand some useful related utilities can be found\n[here](https://github.com/signalfx/sapm-proto/).\n\n## Configuration\n\nThe following settings are required:\n\n- `endpoint` (default = `0.0.0.0:7276`): Address and port that the SAPM\n receiver should bind to.\n\nThe following setting are optional:\n\n- `access_token_passthrough`: (default = `false`) Whether to preserve incoming\n access token (`X-Sf-Token` header value) as `\"com.splunk.signalfx.access_token\"`\n trace resource attribute. Can be used in tandem with identical configuration option\n for [SAPM exporter](../../exporter/sapmexporter/README.md) to preserve trace origin.\n- `tls_settings` (no default): This is an optional object used to specify if TLS should\n be used for incoming connections.\n - `cert_file`: Specifies the certificate file to use for TLS connection.\n Note: Both `key_file` and `cert_file` are required for TLS connection.\n - `key_file`: Specifies the key file to use for TLS connection. Note: Both\n `key_file` and `cert_file` are required for TLS connection.\n\nExample:\n\n```yaml\nreceivers:\n sapm:\n endpoint: localhost:7276\n access_token_passthrough: true\n tls:\n cert_file: /test.crt\n key_file: /test.key\n```\n\nThe full list of settings exposed for this receiver are documented [here](./config.go)\nwith detailed sample configurations [here](./testdata/config.yaml).","properties":{"access_token_passthrough":{"description":"AccessTokenPassthrough indicates whether to associate datapoints with an organization access token received in request.","title":"access_token_passthrough","type":"boolean"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth for this receiver","title":"auth"},"cors":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.confighttp.CORSSettings","description":"CORS configures the server for HTTP cross-origin resource sharing (CORS).","title":"cors"},"endpoint":{"description":"Endpoint configures the listening address for the server.","title":"endpoint","type":"string"},"include_metadata":{"description":"IncludeMetadata propagates the client metadata from the incoming requests to the downstream consumers\nExperimental: *NOTE* this option is subject to change or removal in the future.","title":"include_metadata","type":"boolean"},"max_request_body_size":{"description":"MaxRequestBodySize sets the maximum request body size in bytes","title":"max_request_body_size","type":"integer"},"response_headers":{"description":"Additional headers attached to each HTTP response sent to the client.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"response_headers","type":"object"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSServerSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.signalfxreceiver.Config":{"additionalProperties":false,"description":"Config defines configuration for the SignalFx receiver.","markdownDescription":"# SignalFx Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: metrics, logs |\n| Distributions | [contrib], [splunk], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe SignalFx receiver accepts:\n\n- Metrics in the [SignalFx proto\nformat](https://github.com/signalfx/com_signalfx_metrics_protobuf).\n- Events (Logs) in the [SignalFx proto\nformat](https://github.com/signalfx/com_signalfx_metrics_protobuf/blob/master/proto/signalfx_metrics.proto#L137).\nMore information about sending custom events can be found in the [SignalFx\nDevelopers\nGuide](https://developers.signalfx.com/ingest_data_reference.html#tag/Send-Custom-Events).\n\n## Configuration\n\nThe following settings are required:\n\n- `endpoint` (default = `0.0.0.0:9943`): Address and port that the SignalFx\n receiver should bind to.\n\nThe following settings are optional:\n\n- `access_token_passthrough`: (default = `false`) Whether to preserve incoming\n access token (`X-Sf-Token` header value) as\n `\"com.splunk.signalfx.access_token\"` metric resource attribute. Should only be\n used in tandem with identical configuration option for [SignalFx\n exporter](../../exporter/signalfxexporter/README.md) to preserve datapoint\n origin. Usage of any other exporter in a metric pipeline with this configuration\n option enabled will reveal all organization access tokens contained in this attribute.\n- `tls_settings` (no default): This is an optional object used to specify if\n TLS should be used for incoming connections. Both `key_file` and `cert_file`\n are required to support incoming TLS connections.\n - `cert_file`: Specifies the certificate file to use for TLS connection.\n - `key_file`: Specifies the key file to use for TLS connection.\n\nExample:\n\n```yaml\nreceivers:\n signalfx:\n signalfx/advanced:\n access_token_passthrough: true\n tls:\n cert_file: /test.crt\n key_file: /test.key\n```\n\nThe full list of settings exposed for this receiver are documented [here](./config.go)\nwith detailed sample configurations [here](./testdata/config.yaml).\n\n\u003e :warning: When enabling the SignalFx receiver or exporter, configure both the `metrics` and `logs` pipelines.\n\n```yaml\nservice:\n pipelines:\n metrics:\n receivers: [signalfx]\n processors: [memory_limiter, batch]\n exporters: [signalfx]\n logs:\n receivers: [signalfx]\n processors: [memory_limiter, batch]\n exporters: [signalfx]\n```","properties":{"access_token_passthrough":{"description":"AccessTokenPassthrough indicates whether to associate datapoints with an organization access token received in request.","title":"access_token_passthrough","type":"boolean"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth for this receiver","title":"auth"},"cors":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.confighttp.CORSSettings","description":"CORS configures the server for HTTP cross-origin resource sharing (CORS).","title":"cors"},"endpoint":{"description":"Endpoint configures the listening address for the server.","title":"endpoint","type":"string"},"include_metadata":{"description":"IncludeMetadata propagates the client metadata from the incoming requests to the downstream consumers\nExperimental: *NOTE* this option is subject to change or removal in the future.","title":"include_metadata","type":"boolean"},"max_request_body_size":{"description":"MaxRequestBodySize sets the maximum request body size in bytes","title":"max_request_body_size","type":"integer"},"response_headers":{"description":"Additional headers attached to each HTTP response sent to the client.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"response_headers","type":"object"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSServerSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.simpleprometheusreceiver.Config":{"additionalProperties":false,"description":"Config defines configuration for simple prometheus receiver.","markdownDescription":"# Simple Prometheus Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: metrics |\n| Distributions | [contrib], [observiq], [splunk], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe `prometheus_simple` receiver is a wrapper around the [prometheus\nreceiver](../prometheusreceiver).\nThis receiver provides a simple configuration interface to configure the\nprometheus receiver to scrape metrics from a single target.\n\n## Configuration\n\nThe following settings are required:\n\n- `endpoint` (default = `localhost:9090`): The endpoint from which prometheus\nmetrics should be scraped.\n\nThe following settings are optional:\n\n- `collection_interval` (default = `10s`): The internal at which metrics should\nbe emitted by this receiver.\n- `metrics_path` (default = `/metrics`): The path to the metrics endpoint.\n- `params` (default = `{}`): The query parameters to pass to the metrics endpoint. If specified, params are appended to `metrics_path` to form the URL with which the target is scraped.\n- `use_service_account` (default = `false`): Whether or not to use the\nKubernetes Pod service account for authentication.\n- `tls_enabled` (default = `false`): Whether or not to use TLS. Only if\n`tls_enabled` is set to `true`, the values under `tls_config` are accounted\nfor. This setting will be deprecated. Please use `tls` instead.\n\nThe `tls_config` section supports the following options. This setting will be deprecated. Please use `tls` instead:\n\n- `ca_file` (no default): Path to the CA cert that has signed the TLS\ncertificate.\n- `cert_file` (no default): Path to the client TLS certificate to use for TLS\nrequired connections.\n- `key_file` (no default): Path to the client TLS key to use for TLS required\nconnections.\n- `insecure_skip_verify` (default = `false`): Whether or not to skip\ncertificate verification.\n\n- `tls`: see [TLS Configuration Settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md#tls-configuration-settings) for the full set of available options.\n\nExample:\n\n```yaml\n receivers:\n prometheus_simple:\n collection_interval: 10s\n use_service_account: true\n endpoint: \"172.17.0.5:9153\"\n tls:\n ca_file: \"/path/to/ca\"\n cert_file: \"/path/to/cert\"\n key_file: \"/path/to/key\"\n insecure_skip_verify: true\n exporters:\n signalfx:\n access_token: \u003cSIGNALFX_ACCESS_TOKEN\u003e\n url: \u003cSIGNALFX_INGEST_URL\u003e\n\n service:\n pipelines:\n metrics:\n receivers: [prometheus_simple]\n exporters: [signalfx]\n```\n\nThe full list of settings exposed for this receiver are documented [here](./config.go)\nwith detailed sample configurations [here](./testdata/config.yaml).","properties":{"CustomRoundTripper":{"description":"Custom Round Tripper to allow for individual components to intercept HTTP requests","title":"CustomRoundTripper"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing HTTP calls.","title":"auth"},"collection_interval":{"description":"CollectionInterval is the interval at which metrics should be collected","title":"collection_interval","type":"string"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"endpoint":{"description":"The target URL to send data to (e.g.: http://some.url:9411/v1/traces).","title":"endpoint","type":"string"},"headers":{"description":"Additional headers attached to each HTTP request sent by the client.\nExisting header values are overwritten if collision happens.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"idle_conn_timeout":{"description":"IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"idle_conn_timeout","type":"string"},"labels":{"description":"Labels static labels","patternProperties":{".*":{"type":"string"}},"title":"labels","type":"object"},"max_conns_per_host":{"description":"MaxConnsPerHost limits the total number of connections per host, including connections in the dialing,\nactive, and idle states.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_conns_per_host","type":"integer"},"max_idle_conns":{"description":"MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns","type":"integer"},"max_idle_conns_per_host":{"description":"MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns_per_host","type":"integer"},"metrics_path":{"description":"MetricsPath the path to the metrics endpoint.","title":"metrics_path","type":"string"},"params":{"$ref":"#/$defs/net.url.Values","description":"Params the parameters to the metrics endpoint.","title":"params"},"read_buffer_size":{"description":"ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize.","title":"read_buffer_size","type":"integer"},"timeout":{"description":"Timeout parameter configures `http.Client.Timeout`.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"tls_config":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.simpleprometheusreceiver.tlsConfig","title":"tls_config"},"tls_enabled":{"title":"tls_enabled","type":"boolean"},"use_service_account":{"description":"Whether or not to use pod service account to authenticate.","title":"use_service_account","type":"boolean"},"write_buffer_size":{"description":"WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize.","title":"write_buffer_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.simpleprometheusreceiver.tlsConfig":{"additionalProperties":false,"properties":{"ca_file":{"title":"ca_file","type":"string"},"cert_file":{"title":"cert_file","type":"string"},"insecure_skip_verify":{"title":"insecure_skip_verify","type":"boolean"},"key_file":{"title":"key_file","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.skywalkingreceiver.Config":{"additionalProperties":false,"description":"Config defines configuration for skywalking receiver.","properties":{"protocols":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.skywalkingreceiver.Protocols","title":"protocols"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.skywalkingreceiver.Protocols":{"additionalProperties":false,"description":"Protocols is the configuration for the supported protocols.","markdownDescription":"# Skywalking Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: traces |\n| Distributions | [contrib], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nReceives trace data in [Skywalking](https://skywalking.apache.org/) format.\n\n## Getting Started\n\nBy default, the Skywalking receiver will not serve any protocol. A protocol must be\nnamed under the `protocols` object for the Skywalking receiver to start. The\nbelow protocols are supported, each supports an optional `endpoint`\nobject configuration parameter.\n\n- `grpc` (default `endpoint` = 0.0.0.0:11800)\n- `http` (default `endpoint` = 0.0.0.0:12800)\n\nExamples:\n\n```yaml\nreceivers:\n skywalking:\n protocols:\n grpc:\n endpoint: 0.0.0.0:11800\n http:\n endpoint: 0.0.0.0:12800\n\nservice:\n pipelines:\n traces:\n receivers: [skywalking]\n```","properties":{"grpc":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configgrpc.GRPCServerSettings","title":"grpc"},"http":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.confighttp.HTTPServerSettings","title":"http"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snmpreceiver.Attribute":{"additionalProperties":false,"description":"Attribute is a connection between a metric configuration and an AttributeConfig","properties":{"name":{"description":"Name is required and should match the key for an AttributeConfig","title":"name","type":"string"},"value":{"description":"Value is optional and is only needed for a matched AttributeConfig's with enum value.\nValue should match one of the AttributeConfig's enum values in this case","title":"value","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snmpreceiver.AttributeConfig":{"additionalProperties":false,"description":"AttributeConfig contains config info about all of the metric attributes that will be used by this receiver.","properties":{"description":{"description":"Description is optional and describes what the attribute represents","title":"description","type":"string"},"enum":{"description":"Enum is required only if OID and IndexedValuePrefix are not defined.\nThis contains a list of possible values that can be associated with this attribute","items":{"type":"string"},"title":"enum","type":"array"},"indexed_value_prefix":{"description":"IndexedValuePrefix is required only if Enum and OID are not defined.\nThis is used alongside metrics with ColumnOIDs to assign attribute values using this prefix + the OID index of the metric value","title":"indexed_value_prefix","type":"string"},"oid":{"description":"OID is required only if Enum and IndexedValuePrefix are not defined.\nThis is the column OID which will provide indexed values to be uased for this attribute (alongside a metric with ColumnOIDs)","title":"oid","type":"string"},"value":{"description":"Value is optional, and will allow for a different attribute key other than the attribute name","title":"value","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snmpreceiver.ColumnOID":{"additionalProperties":false,"description":"ColumnOID holds OID info for an indexed metric as well as any attributes or resource attributes that are attached to it","properties":{"attributes":{"description":"Attributes is required only if there are no ResourceAttributes associated defined here.\nValid values are non enum AttributeConfig names that will be used to differentiate the\nindexed values for the column OID","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snmpreceiver.Attribute"},"title":"attributes","type":"array"},"oid":{"description":"OID is required and is the column OID that is associated with a metric","title":"oid","type":"string"},"resource_attributes":{"description":"ResourceAttributes is required only if there are no Attributes associated with non enum\nAttributeConfigs defined here. Valid values are ResourceAttributeConfig names that will\nbe used to differentiate the indexed values for the column OID","items":{"type":"string"},"title":"resource_attributes","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snmpreceiver.Config":{"additionalProperties":false,"description":"Config defines the configuration for the various elements of the receiver.","properties":{"attributes":{"description":"Attributes defines what attributes will be used on metrics for this receiver and is composed of\nattribute names along with their attribute configurations","patternProperties":{".*":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snmpreceiver.AttributeConfig"}},"title":"attributes","type":"object"},"auth_password":{"description":"AuthPassword is the authentication password used for this SNMP connection.\nOnly valid for version \"v3\" and if \"no_auth_no_priv\" is not selected for SecurityLevel","title":"auth_password","type":"string"},"auth_type":{"description":"AuthType is the type of authentication protocol to use for this SNMP connection.\nOnly valid for version “v3” and if “no_auth_no_priv” is not selected for SecurityLevel\nValid options: “md5”, “sha”, “sha224”, “sha256”, “sha384”, “sha512”\nDefault: \"md5\"","title":"auth_type","type":"string"},"collection_interval":{"title":"collection_interval","type":"string"},"community":{"description":"Community is the SNMP community string to use.\nOnly valid for versions \"v1\" and \"v2c\"\nDefault: public","title":"community","type":"string"},"endpoint":{"description":"Endpoint is the SNMP target to request data from. Must be formatted as [udp|tcp|][4|6|]://{host}:{port}.\nDefault: udp://localhost:161\nIf no scheme is given, udp4 is assumed.\nIf no port is given, 161 is assumed.","title":"endpoint","type":"string"},"initial_delay":{"title":"initial_delay","type":"string"},"metrics":{"description":"Metrics defines what SNMP metrics will be collected for this receiver and is composed of metric\nnames along with their metric configurations","patternProperties":{".*":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snmpreceiver.MetricConfig"}},"title":"metrics","type":"object"},"privacy_password":{"description":"PrivacyPassword is the authentication password used for this SNMP connection.\nOnly valid for version “v3” and if \"auth_priv\" is selected for SecurityLevel","title":"privacy_password","type":"string"},"privacy_type":{"description":"PrivacyType is the type of privacy protocol to use for this SNMP connection.\nOnly valid for version “v3” and if \"auth_priv\" is selected for SecurityLevel\nValid options: “des”, “aes”, “aes192”, “aes256”, “aes192c”, “aes256c”\nDefault: \"des\"","title":"privacy_type","type":"string"},"resource_attributes":{"description":"ResourceAttributes defines what resource attributes will be used for this receiver and is composed\nof resource attribute names along with their resource attribute configurations","patternProperties":{".*":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snmpreceiver.ResourceAttributeConfig"}},"title":"resource_attributes","type":"object"},"security_level":{"description":"SecurityLevel is the security level to use for this SNMP connection.\nOnly valid for version “v3”\nValid options: “no_auth_no_priv”, “auth_no_priv”, “auth_priv”\nDefault: \"no_auth_no_priv\"","title":"security_level","type":"string"},"user":{"description":"User is the SNMP User for this connection.\nOnly valid for version “v3”","title":"user","type":"string"},"version":{"description":"Version is the version of SNMP to use for this connection.\nValid options: v1, v2c, v3.\nDefault: v2c","title":"version","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snmpreceiver.GaugeMetric":{"additionalProperties":false,"description":"GaugeMetric contains info about the value of the gauge metric","properties":{"value_type":{"description":"ValueType is required can can be either int or double","title":"value_type","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snmpreceiver.MetricConfig":{"additionalProperties":false,"description":"MetricConfig contains config info about a given metric","properties":{"column_oids":{"items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snmpreceiver.ColumnOID"},"title":"column_oids","type":"array"},"description":{"description":"Description is optional and describes what this metric represents","title":"description","type":"string"},"gauge":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snmpreceiver.GaugeMetric","description":"Either Gauge or Sum config is required","title":"gauge"},"scalar_oids":{"description":"Either ScalarOIDs or ColumnOIDs is required.\nScalarOIDs is used if one or more scalar OID values is used for this metric.\nColumnOIDs is used if one or more column OID indexed set of values is used\nfor this metric.","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snmpreceiver.ScalarOID"},"title":"scalar_oids","type":"array"},"sum":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snmpreceiver.SumMetric","title":"sum"},"unit":{"description":"Unit is required","title":"unit","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snmpreceiver.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig contains config info about all of the resource attributes that will be used by this receiver.","properties":{"description":{"description":"Description is optional and describes what the resource attribute represents","title":"description","type":"string"},"indexed_value_prefix":{"description":"IndexedValuePrefix is required only if OID is not defined.\nThis will be used alongside indexed metric values for this resource attribute. The prefix value concatenated with\nspecific indexes of metric indexed values (Ex: prefix.1.2) will ultimately each be associated with a different \"resource\"\nas an attribute on that resource. The related indexed metric values will then be used to associate metric datapoints to\nthose resources.","title":"indexed_value_prefix","type":"string"},"oid":{"description":"OID is required only if IndexedValuePrefix is not defined.\nThis is the column OID which will provide indexed values to be used for this resource attribute. These indexed values\nwill ultimately each be associated with a different \"resource\" as an attribute on that resource. Indexed metric values\nwill then be used to associate metric datapoints to the matching \"resource\" (based on matching indexes).","title":"oid","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snmpreceiver.ScalarOID":{"additionalProperties":false,"description":"ScalarOID holds OID info for a scalar metric as well as any attributes that are attached to it","properties":{"attributes":{"description":"Attributes is optional and may contain names and values associated with enum\nAttributeConfigs to associate with the value of the scalar OID","items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snmpreceiver.Attribute"},"title":"attributes","type":"array"},"oid":{"description":"OID is required and is the scalar OID that is associated with a metric","title":"oid","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snmpreceiver.SumMetric":{"additionalProperties":false,"description":"SumMetric contains info about the value of the sum metric","properties":{"aggregation":{"description":"Aggregation is required and can be cumulative or delta","title":"aggregation","type":"string"},"monotonic":{"description":"Monotonic is required and can be true or false","title":"monotonic","type":"boolean"},"value_type":{"description":"ValueType is required can can be either int or double","title":"value_type","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.Config":{"additionalProperties":false,"properties":{"account":{"title":"account","type":"string"},"collection_interval":{"title":"collection_interval","type":"string"},"database":{"title":"database","type":"string"},"initial_delay":{"title":"initial_delay","type":"string"},"metrics":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.MetricsConfig","title":"metrics"},"password":{"title":"password","type":"string"},"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.ResourceAttributesConfig","title":"resource_attributes"},"role":{"title":"role","type":"string"},"schema":{"title":"schema","type":"string"},"username":{"title":"username","type":"string"},"warehouse":{"title":"warehouse","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.MetricConfig":{"additionalProperties":false,"description":"MetricConfig provides common config for a particular metric.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.MetricsConfig":{"additionalProperties":false,"description":"MetricsConfig provides config for snowflake metrics.","properties":{"snowflake.billing.cloud_service.total":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.MetricConfig","title":"snowflake.billing.cloud_service.total"},"snowflake.billing.total_credit.total":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.MetricConfig","title":"snowflake.billing.total_credit.total"},"snowflake.billing.virtual_warehouse.total":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.MetricConfig","title":"snowflake.billing.virtual_warehouse.total"},"snowflake.billing.warehouse.cloud_service.total":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.MetricConfig","title":"snowflake.billing.warehouse.cloud_service.total"},"snowflake.billing.warehouse.total_credit.total":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.MetricConfig","title":"snowflake.billing.warehouse.total_credit.total"},"snowflake.billing.warehouse.virtual_warehouse.total":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.MetricConfig","title":"snowflake.billing.warehouse.virtual_warehouse.total"},"snowflake.database.bytes_scanned.avg":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.MetricConfig","title":"snowflake.database.bytes_scanned.avg"},"snowflake.database.query.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.MetricConfig","title":"snowflake.database.query.count"},"snowflake.logins.total":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.MetricConfig","title":"snowflake.logins.total"},"snowflake.pipe.credits_used.total":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.MetricConfig","title":"snowflake.pipe.credits_used.total"},"snowflake.query.blocked":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.MetricConfig","title":"snowflake.query.blocked"},"snowflake.query.bytes_deleted.avg":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.MetricConfig","title":"snowflake.query.bytes_deleted.avg"},"snowflake.query.bytes_spilled.local.avg":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.MetricConfig","title":"snowflake.query.bytes_spilled.local.avg"},"snowflake.query.bytes_spilled.remote.avg":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.MetricConfig","title":"snowflake.query.bytes_spilled.remote.avg"},"snowflake.query.bytes_written.avg":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.MetricConfig","title":"snowflake.query.bytes_written.avg"},"snowflake.query.compilation_time.avg":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.MetricConfig","title":"snowflake.query.compilation_time.avg"},"snowflake.query.data_scanned_cache.avg":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.MetricConfig","title":"snowflake.query.data_scanned_cache.avg"},"snowflake.query.executed":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.MetricConfig","title":"snowflake.query.executed"},"snowflake.query.execution_time.avg":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.MetricConfig","title":"snowflake.query.execution_time.avg"},"snowflake.query.partitions_scanned.avg":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.MetricConfig","title":"snowflake.query.partitions_scanned.avg"},"snowflake.query.queued_overload":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.MetricConfig","title":"snowflake.query.queued_overload"},"snowflake.query.queued_provision":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.MetricConfig","title":"snowflake.query.queued_provision"},"snowflake.queued_overload_time.avg":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.MetricConfig","title":"snowflake.queued_overload_time.avg"},"snowflake.queued_provisioning_time.avg":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.MetricConfig","title":"snowflake.queued_provisioning_time.avg"},"snowflake.queued_repair_time.avg":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.MetricConfig","title":"snowflake.queued_repair_time.avg"},"snowflake.rows_deleted.avg":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.MetricConfig","title":"snowflake.rows_deleted.avg"},"snowflake.rows_inserted.avg":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.MetricConfig","title":"snowflake.rows_inserted.avg"},"snowflake.rows_produced.avg":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.MetricConfig","title":"snowflake.rows_produced.avg"},"snowflake.rows_unloaded.avg":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.MetricConfig","title":"snowflake.rows_unloaded.avg"},"snowflake.rows_updated.avg":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.MetricConfig","title":"snowflake.rows_updated.avg"},"snowflake.session_id.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.MetricConfig","title":"snowflake.session_id.count"},"snowflake.storage.failsafe_bytes.total":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.MetricConfig","title":"snowflake.storage.failsafe_bytes.total"},"snowflake.storage.stage_bytes.total":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.MetricConfig","title":"snowflake.storage.stage_bytes.total"},"snowflake.storage.storage_bytes.total":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.MetricConfig","title":"snowflake.storage.storage_bytes.total"},"snowflake.total_elapsed_time.avg":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.MetricConfig","title":"snowflake.total_elapsed_time.avg"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig provides common config for a particular resource attribute.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.ResourceAttributesConfig":{"additionalProperties":false,"description":"ResourceAttributesConfig provides config for snowflake resource attributes.","properties":{"snowflake.account.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.internal.metadata.ResourceAttributeConfig","title":"snowflake.account.name"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.solacereceiver.Authentication":{"additionalProperties":false,"description":"Authentication defines authentication strategies.","properties":{"sasl_external":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.solacereceiver.SaslExternalConfig","title":"sasl_external"},"sasl_plain":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.solacereceiver.SaslPlainTextConfig","title":"sasl_plain"},"sasl_xauth2":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.solacereceiver.SaslXAuth2Config","title":"sasl_xauth2"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.solacereceiver.Config":{"additionalProperties":false,"description":"Config defines configuration for Solace receiver.","markdownDescription":"# Solace Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: traces |\n| Distributions | [contrib], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe Solace receiver receives trace data from a [Solace PubSub+ Event Broker](https://solace.com/products/event-broker/).\n\n## Getting Started\nTo get started with the Solace receiver, a telemetry queue and authentication details must be configured. If connecting to a broker other than localhost, the `broker` field should be configured.\n```yaml\nreceivers:\n solace:\n broker: [localhost:5671]\n auth:\n sasl_plain:\n username: otel\n password: otel01$\n queue: queue://#telemetry-profile123\n\nservice:\n pipelines:\n traces:\n receivers: [solace]\n```\n\n## Configuration\nThe configuration parameters are:\n\n- broker (Solace broker using amqp over tls; optional; default: localhost:5671; format: ip(host):port)\n- queue (The name of the Solace queue to get span trace messages from; required; format: `queue://#telemetry-myTelemetryProfile`)\n- max_unacknowledged (The maximum number of unacknowledged messages the Solace broker can transmit; optional; default: 10)\n- tls (Advanced tls configuration, secure by default)\n - insecure (The switch from ‘amqps’ to 'amqp’ to disable tls; optional; default: false)\n - server_name_override (Server name is the value of the Server Name Indication extension sent by the client; optional; default: empty string)\n - insecure_skip_verify (Disables server certificate validation; optional; default: false)\n - ca_file (Path to the User specified trust-store; used for a client to verify the server certificate; if empty uses system root CA; optional, default: empty string)\n - cert_file (Path to the TLS cert for client cert authentication, it is required when authentication sasl_external is chosen; non optional for sasl_external authentication)\n - key_file (Path to the TLS key for client cert authentication, it is required when authentication sasl_external is chosen; non optional for sasl_external authentication)\n- auth (Authentication settings. Permitted sub sub-configurations: sasl_plain, sasl_xauth2, sasl_external)\n - sasl_plain (Enables SASL PLAIN authentication)\n - username (The username to use, required for sasl_plain authentication)\n - password (The password to use; required for sasl_plain authentication)\n - sasl_xauth2 (SASL XOauth2 authentication)\n - username (The username to use; required for sasl_xauth2 authentication)\n - bearer (The bearer token in plain text; required for sasl_xauth2 authentication)\n - sasl_external (SASL External required to be used for TLS client cert authentication. When this authentication type is chosen then tls cert_file and key_file are required)\n- flow_control (Configures the behaviour to use when temporary errors are encountered from the next component)\n - delayed_retry (Default flow control strategy. Sets the flow control strategy to delayed retry which will wait before trying to push the message to the next component again)\n - delay (The delay, e.g. 10ms, to wait before retrying. Default is 10ms)\n\n### Examples:\nSimple single node configuration with SASL plain authentication (TLS enabled by default)\n\n```yaml\nreceivers:\n solace:\n broker: [localhost:5671]\n auth:\n sasl_plain:\n username: otel\n password: otel01$\n queue: queue://#telemetry-profile123\n\nservice:\n pipelines:\n traces:\n receivers: [solace]\n```\n\nHigh availability setup with SASL plain authentication (TLS enabled by default)\n```yaml\nreceivers:\n solace/primary:\n broker: [myHost-primary:5671]\n auth:\n sasl_plain:\n username: otel\n password: otel01$\n queue: queue://#telemetry-profile123\n\n solace/backup:\n broker: [myHost-backup:5671]\n auth:\n sasl_plain:\n username: otel\n password: otel01$\n queue: queue://#telemetry-profile123\n\nservice:\n pipelines:\n traces/solace:\n receivers: [solace/primary,solace/backup]\n```","properties":{"auth":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.solacereceiver.Authentication","title":"auth"},"broker":{"description":"The list of solace brokers (default localhost:5671)","items":{"type":"string"},"title":"broker","type":"array"},"flow_control":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.solacereceiver.FlowControl","title":"flow_control"},"max_unacknowledged":{"description":"The maximum number of unacknowledged messages the Solace broker can transmit, to configure AMQP Link","title":"max_unacknowledged","type":"integer"},"queue":{"description":"The name of the solace queue to consume from, it is required parameter","title":"queue","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","title":"tls"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.solacereceiver.FlowControl":{"additionalProperties":false,"description":"FlowControl defines the configuration for what to do in backpressure scenarios, e.g.","properties":{"delayed_retry":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.solacereceiver.FlowControlDelayedRetry","title":"delayed_retry"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.solacereceiver.FlowControlDelayedRetry":{"additionalProperties":false,"description":"FlowControlDelayedRetry represents the strategy of waiting for a defined amount of time (in time.Duration) and attempt redelivery","properties":{"delay":{"title":"delay","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.solacereceiver.SaslExternalConfig":{"additionalProperties":false,"description":"SaslExternalConfig defines the configuration for the SASL External used in conjunction with TLS client authentication.","properties":{},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.solacereceiver.SaslPlainTextConfig":{"additionalProperties":false,"description":"SaslPlainTextConfig defines SASL PLAIN authentication.","properties":{"password":{"title":"password","type":"string"},"username":{"title":"username","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.solacereceiver.SaslXAuth2Config":{"additionalProperties":false,"description":"SaslXAuth2Config defines the configuration for the SASL XAUTH2 authentication.","properties":{"bearer":{"title":"bearer","type":"string"},"username":{"title":"username","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.splunkhecreceiver.Config":{"additionalProperties":false,"description":"Config defines configuration for the Splunk HEC receiver.","properties":{"access_token_passthrough":{"description":"AccessTokenPassthrough indicates whether to associate datapoints with an organization access token received in request.","title":"access_token_passthrough","type":"boolean"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth for this receiver","title":"auth"},"cors":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.confighttp.CORSSettings","description":"CORS configures the server for HTTP cross-origin resource sharing (CORS).","title":"cors"},"endpoint":{"description":"Endpoint configures the listening address for the server.","title":"endpoint","type":"string"},"health_path":{"description":"HealthPath for health API, default is '/services/collector/health'","title":"health_path","type":"string"},"hec_metadata_to_otel_attrs":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.internal.splunk.HecToOtelAttrs","description":"HecToOtelAttrs creates a mapping from HEC metadata to attributes.","title":"hec_metadata_to_otel_attrs"},"include_metadata":{"description":"IncludeMetadata propagates the client metadata from the incoming requests to the downstream consumers\nExperimental: *NOTE* this option is subject to change or removal in the future.","title":"include_metadata","type":"boolean"},"max_request_body_size":{"description":"MaxRequestBodySize sets the maximum request body size in bytes","title":"max_request_body_size","type":"integer"},"raw_path":{"description":"RawPath for raw data collection, default is '/services/collector/raw'","title":"raw_path","type":"string"},"response_headers":{"description":"Additional headers attached to each HTTP response sent to the client.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"response_headers","type":"object"},"splitting":{"description":"Splitting defines the splitting strategy used by the receiver when ingesting raw events. Can be set to \"line\" or \"none\". Default is \"line\".","title":"splitting","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSServerSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sqlqueryreceiver.Config":{"additionalProperties":false,"markdownDescription":"# SQL Query Receiver (Alpha)\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [alpha]: metrics |\n| | [development]: logs |\n| Distributions | [contrib], [observiq], [splunk], [sumo] |\n\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha\n[development]: https://github.com/open-telemetry/opentelemetry-collector#development\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe SQL Query Receiver uses custom SQL queries to generate metrics from a database connection.\n\n\u003e :construction: This receiver is in **ALPHA**. Behavior, configuration fields, and metric data model are subject to\n\u003e change.\n\n## Configuration\n\nThe configuration supports the following top-level fields:\n\n- `driver`(required): The name of the database driver: one of _postgres_, _mysql_, _snowflake_, _sqlserver_, _hdb_ (SAP\n HANA), or _oracle_ (Oracle DB).\n- `datasource`(required): The datasource value passed to [sql.Open](https://pkg.go.dev/database/sql#Open). This is\n a driver-specific string usually consisting of at least a database name and connection information. This is sometimes\n referred to as the \"connection string\" in driver documentation.\n e.g. _host=localhost port=5432 user=me password=s3cr3t sslmode=disable_\n- `queries`(required): A list of queries, where a query is a sql statement and one or more `logs` and/or `metrics` sections (details below).\n- `collection_interval`(optional): The time interval between query executions. Defaults to _10s_.\n- `storage` (optional, default `\"\"`): The ID of a [storage][storage_extension] extension to be used to [track processed results](#tracking-processed-results).\n\n[storage_extension]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/extension/storage/filestorage\n\n### Queries\n\nA _query_ consists of a sql statement and one or more `logs` and/or `metrics` section.\nAt least one `logs` or one `metrics` section is required.\nNote that technically you can put both `logs` and `metrics` sections in a single query section,\nbut it's probably not a real world use case, as the requirements for logs and metrics queries\nare quite different.\n\nAdditionally, each `query` section supports the following properties:\n\n- `tracking_column` (optional, default `\"\"`) Applies only to logs. In case of a parameterized query,\n defines the column to retrieve the value of the parameter on subsequent query runs.\n See the below section [Tracking processed results](#tracking-processed-results).\n- `tracking_start_value` (optional, default `\"\"`) Applies only to logs. In case of a parameterized query, defines the initial value for the parameter.\n See the below section [Tracking processed results](#tracking-processed-results).\n\nExample:\n\n```yaml\nreceivers:\n sqlquery:\n driver: postgres\n datasource: \"host=localhost port=5432 user=postgres password=s3cr3t sslmode=disable\"\n queries:\n - sql: \"select * from my_logs where log_id \u003e $$1\"\n tracking_start_value: \"10000\"\n tracking_column: log_id\n logs:\n - body_column: log_body\n - sql: \"select count(*) as count, genre from movie group by genre\"\n metrics:\n - metric_name: movie.genres\n value_column: \"count\"\n```\n\n#### Logs Queries\n\nThe `logs` section is in development.\n\n- `body_column` (required) defines the column to use as the log record's body.\n\n##### Tracking processed results\n\nWith the default configuration and a non-parameterized logs query like `select * from my_logs`,\nthe receiver will run the same query every collection interval, which can cause reading the same rows\nover and over again, unless there's an external actor removing the old rows from the `my_logs` table.\n\nTo prevent reading the same rows on every collection interval, use a parameterized query like `select * from my_logs where id_column \u003e ?`,\ntogether with the `tracking_start_value` and `tracking_column` configuration properties.\nThe receiver will use the configured `tracking_start_value` as the value for the query parameter when running the query for the first time.\nAfter each query run, the receiver will store the value of the `tracking_column` from the last row of the result set and use it as the value for the query parameter on next collection interval. To prevent duplicate log downloads, make sure to sort the query results in ascending order by the tracking_column value.\n\nNote that the notation for the parameter depends on the database backend. For example in MySQL this is `?`, in PostgreSQL this is `$1`, in Oracle this is any string identifier starting with a colon `:`, for example `:my_parameter`.\n\nUse the `storage` configuration property of the receiver to persist the tracking value across collector restarts.\n\n#### Metrics queries\n\nEach `metrics` section consists of a\n`metric_name`, a `value_column`, and additional optional fields.\nEach _metric_ in the configuration will produce one OTel metric per row returned from its sql query.\n\n- `metric_name`(required): the name assigned to the OTel metric.\n- `value_column`(required): the column name in the returned dataset used to set the value of the metric's datapoint.\n This may be case-sensitive, depending on the driver (e.g. Oracle DB).\n- `attribute_columns`(optional): a list of column names in the returned dataset used to set attibutes on the datapoint.\n These attributes may be case-sensitive, depending on the driver (e.g. Oracle DB).\n- `data_type` (optional): can be `gauge` or `sum`; defaults to `gauge`.\n- `value_type` (optional): can be `int` or `double`; defaults to `int`.\n- `monotonic` (optional): boolean; whether a cumulative sum's value is monotonically increasing (i.e. never rolls over\n or resets); defaults to false.\n- `aggregation` (optional): only applicable for `data_type=sum`; can be `cumulative` or `delta`; defaults\n to `cumulative`.\n- `description` (optional): the description applied to the metric.\n- `unit` (optional): the units applied to the metric.\n- `static_attributes` (optional): static attributes applied to the metrics.\n- `start_ts_column` (optional): the name of the column containing the start timestamp, the value of which is applied to \n the metric's start timestamp (otherwise the current time is used). Only applies if the metric is of type cumulative \n sum.\n- `ts_column` (optional): the name of the column containing the timestamp, the value of which is applied to the \n metric's timestamp. This can be current timestamp depending upon the time of last recorded metric's datapoint.\n\n### Example\n\n```yaml\nreceivers:\n sqlquery:\n driver: postgres\n datasource: \"host=localhost port=5432 user=postgres password=s3cr3t sslmode=disable\"\n storage: file_storage\n queries:\n - sql: \"select * from my_logs where log_id \u003e $$1\"\n tracking_start_value: \"10000\"\n tracking_column: log_id\n logs:\n - body_column: log_body\n - sql: \"select count(*) as count, genre from movie group by genre\"\n metrics:\n - metric_name: movie.genres\n value_column: \"count\"\n attribute_columns: [\"genre\"]\n static_attributes:\n dbinstance: mydbinstance\n```\n\nGiven a `movie` table with three rows:\n\n| name | genre |\n| --------- | ------ |\n| E.T. | sci-fi |\n| Star Wars | sci-fi |\n| Die Hard | action |\n\nIf there are two rows returned from the query `select count(*) as count, genre from movie group by genre`:\n\n| count | genre |\n| ----- | ------ |\n| 2 | sci-fi |\n| 1 | action |\n\nthen the above config will produce two metrics at each collection interval:\n\n```\nMetric #0\nDescriptor:\n -\u003e Name: movie.genres\n -\u003e DataType: Gauge\nNumberDataPoints #0\nData point attributes:\n -\u003e genre: STRING(sci-fi)\n -\u003e dbinstance: STRING(mydbinstance)\nValue: 2\n\nMetric #1\nDescriptor:\n -\u003e Name: movie.genres\n -\u003e DataType: Gauge\nNumberDataPoints #0\nData point attributes:\n -\u003e genre: STRING(action)\n -\u003e dbinstance: STRING(mydbinstance)\nValue: 1\n```\n\n#### NULL values\n\nAvoid queries that produce any NULL values. If a query produces a NULL value, a warning will be logged. Furthermore,\nif a configuration references the column that produces a NULL value, an additional error will be logged. However, in\neither case, the receiver will continue to operate.\n\n#### Oracle DB Driver Example\n\nRefer to the config file [provided](./testdata/oracledb-receiver-config.yaml) for an example of using the\nOracle DB driver to connect and query the same table schema and contents as the example above.\nThe Oracle DB driver documentation can be found [here.](https://github.com/sijms/go-ora)\nAnother usage example is the `go_ora`\nexample [here.](https://blogs.oracle.com/developers/post/connecting-a-go-application-to-oracle-database)","properties":{"collection_interval":{"title":"collection_interval","type":"string"},"datasource":{"title":"datasource","type":"string"},"driver":{"title":"driver","type":"string"},"initial_delay":{"title":"initial_delay","type":"string"},"queries":{"items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sqlqueryreceiver.Query"},"title":"queries","type":"array"},"storage":{"title":"storage","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sqlqueryreceiver.LogsCfg":{"additionalProperties":false,"properties":{"body_column":{"title":"body_column","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sqlqueryreceiver.MetricCfg":{"additionalProperties":false,"properties":{"aggregation":{"title":"aggregation","type":"string"},"attribute_columns":{"items":{"type":"string"},"title":"attribute_columns","type":"array"},"data_type":{"title":"data_type","type":"string"},"description":{"title":"description","type":"string"},"metric_name":{"title":"metric_name","type":"string"},"monotonic":{"title":"monotonic","type":"boolean"},"start_ts_column":{"title":"start_ts_column","type":"string"},"static_attributes":{"patternProperties":{".*":{"type":"string"}},"title":"static_attributes","type":"object"},"ts_column":{"title":"ts_column","type":"string"},"unit":{"title":"unit","type":"string"},"value_column":{"title":"value_column","type":"string"},"value_type":{"title":"value_type","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sqlqueryreceiver.Query":{"additionalProperties":false,"properties":{"logs":{"items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sqlqueryreceiver.LogsCfg"},"title":"logs","type":"array"},"metrics":{"items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sqlqueryreceiver.MetricCfg"},"title":"metrics","type":"array"},"sql":{"title":"sql","type":"string"},"tracking_column":{"title":"tracking_column","type":"string"},"tracking_start_value":{"title":"tracking_start_value","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sqlserverreceiver.Config":{"additionalProperties":false,"description":"Config defines configuration for a sqlserver receiver.","markdownDescription":"# Microsoft SQL Server Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: metrics |\n| Distributions | [contrib], [observiq], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe `sqlserver` receiver grabs metrics about a Microsoft SQL Server instance using the Windows Performance Counters.\nBecause of this, it is a Windows only receiver.\n\n## Configuration\n\nThe following settings are optional:\n- `collection_interval` (default = `10s`): The internal at which metrics should be emitted by this receiver.\n\nTo collect from a SQL Server with a named instance, both `computer_name` and `instance_name` are required. For a default SQL Server setup, these settings are optional.\n- `computer_name` (optional): The computer name identifies the SQL Server name or IP address of the computer being monitored.\n- `instance_name` (optional): The instance name identifies the specific SQL Server instance being monitored.\n\nExample:\n\n```yaml\n receivers:\n sqlserver:\n collection_interval: 10s\n```\n\nWhen a named instance is used, a computer name and a instance name must be specified.\nExample with named instance:\n\n```yaml\n receivers:\n sqlserver:\n collection_interval: 10s\n computer_name: CustomServer\n instance_name: CustomInstance\n resource_attributes:\n sqlserver.computer.name:\n enabled: true\n sqlserver.instance.name:\n enabled: true\n```\n\nThe full list of settings exposed for this receiver are documented [here](./config.go) with detailed sample configurations [here](./testdata/config.yaml).\n\n## Metrics\n\nDetails about the metrics produced by this receiver can be found in [documentation.md](./documentation.md)","properties":{"collection_interval":{"title":"collection_interval","type":"string"},"computer_name":{"title":"computer_name","type":"string"},"initial_delay":{"title":"initial_delay","type":"string"},"instance_name":{"title":"instance_name","type":"string"},"metrics":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sqlserverreceiver.internal.metadata.MetricsConfig","title":"metrics"},"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sqlserverreceiver.internal.metadata.ResourceAttributesConfig","title":"resource_attributes"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sqlserverreceiver.internal.metadata.MetricConfig":{"additionalProperties":false,"description":"MetricConfig provides common config for a particular metric.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sqlserverreceiver.internal.metadata.MetricsConfig":{"additionalProperties":false,"description":"MetricsConfig provides config for sqlserver metrics.","properties":{"sqlserver.batch.request.rate":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sqlserverreceiver.internal.metadata.MetricConfig","title":"sqlserver.batch.request.rate"},"sqlserver.batch.sql_compilation.rate":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sqlserverreceiver.internal.metadata.MetricConfig","title":"sqlserver.batch.sql_compilation.rate"},"sqlserver.batch.sql_recompilation.rate":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sqlserverreceiver.internal.metadata.MetricConfig","title":"sqlserver.batch.sql_recompilation.rate"},"sqlserver.lock.wait.rate":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sqlserverreceiver.internal.metadata.MetricConfig","title":"sqlserver.lock.wait.rate"},"sqlserver.lock.wait_time.avg":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sqlserverreceiver.internal.metadata.MetricConfig","title":"sqlserver.lock.wait_time.avg"},"sqlserver.page.buffer_cache.hit_ratio":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sqlserverreceiver.internal.metadata.MetricConfig","title":"sqlserver.page.buffer_cache.hit_ratio"},"sqlserver.page.checkpoint.flush.rate":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sqlserverreceiver.internal.metadata.MetricConfig","title":"sqlserver.page.checkpoint.flush.rate"},"sqlserver.page.lazy_write.rate":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sqlserverreceiver.internal.metadata.MetricConfig","title":"sqlserver.page.lazy_write.rate"},"sqlserver.page.life_expectancy":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sqlserverreceiver.internal.metadata.MetricConfig","title":"sqlserver.page.life_expectancy"},"sqlserver.page.operation.rate":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sqlserverreceiver.internal.metadata.MetricConfig","title":"sqlserver.page.operation.rate"},"sqlserver.page.split.rate":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sqlserverreceiver.internal.metadata.MetricConfig","title":"sqlserver.page.split.rate"},"sqlserver.transaction.rate":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sqlserverreceiver.internal.metadata.MetricConfig","title":"sqlserver.transaction.rate"},"sqlserver.transaction.write.rate":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sqlserverreceiver.internal.metadata.MetricConfig","title":"sqlserver.transaction.write.rate"},"sqlserver.transaction_log.flush.data.rate":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sqlserverreceiver.internal.metadata.MetricConfig","title":"sqlserver.transaction_log.flush.data.rate"},"sqlserver.transaction_log.flush.rate":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sqlserverreceiver.internal.metadata.MetricConfig","title":"sqlserver.transaction_log.flush.rate"},"sqlserver.transaction_log.flush.wait.rate":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sqlserverreceiver.internal.metadata.MetricConfig","title":"sqlserver.transaction_log.flush.wait.rate"},"sqlserver.transaction_log.growth.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sqlserverreceiver.internal.metadata.MetricConfig","title":"sqlserver.transaction_log.growth.count"},"sqlserver.transaction_log.shrink.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sqlserverreceiver.internal.metadata.MetricConfig","title":"sqlserver.transaction_log.shrink.count"},"sqlserver.transaction_log.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sqlserverreceiver.internal.metadata.MetricConfig","title":"sqlserver.transaction_log.usage"},"sqlserver.user.connection.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sqlserverreceiver.internal.metadata.MetricConfig","title":"sqlserver.user.connection.count"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sqlserverreceiver.internal.metadata.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig provides common config for a particular resource attribute.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sqlserverreceiver.internal.metadata.ResourceAttributesConfig":{"additionalProperties":false,"description":"ResourceAttributesConfig provides config for sqlserver resource attributes.","properties":{"sqlserver.computer.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sqlserverreceiver.internal.metadata.ResourceAttributeConfig","title":"sqlserver.computer.name"},"sqlserver.database.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sqlserverreceiver.internal.metadata.ResourceAttributeConfig","title":"sqlserver.database.name"},"sqlserver.instance.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sqlserverreceiver.internal.metadata.ResourceAttributeConfig","title":"sqlserver.instance.name"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sshcheckreceiver.Config":{"additionalProperties":false,"markdownDescription":"# SSH Check Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [alpha]: metrics |\n| Distributions | [contrib], [sumo] |\n\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThis receiver creates stats by connecting to an SSH server which may be an SFTP server.\n\n## Prerequisites\n\nIf `ignore_host_key` is not set then host key validation requires the agent either have a known_hosts file at a path specified by setting `known_hosts` or at default paths indicated by ssh man pages: $HOME/.ssh/known_hosts or /etc/ssh/known_hosts.\n\n## Configuration\n\nThe following settings are required:\n- `endpoint`\n- `username`\n- `password` or `keyfile`\n\nEither `password` or `keyfile` must be set. But if both are set then password is treated as the `passphrase` and the key is assumed to be encrypted.\n\nThe following settings are optional:\n\n- `collection_interval` (default = `60s`): This receiver collects metrics on an interval. Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`.\n- `known_hosts` (default = ssh defaults): The path to the known_hosts file. If this isn't set then default locations are checked at `$HOME/.ssh/known_hosts` and `/etc/ssh/known_hosts`.\n- `ignore_host_key` (default = false): Can override conventional ssh security for use cases like tests where authentication via the known_hosts file isn't required.\n\n### Example Configuration\n\n```yaml\nreceivers:\n sshcheck:\n endpoint: localhost:2222\n username: otelu\n password: $OTELP\n collection_interval: 60s\n```\n\nThe full list of settings exposed for this receiver are documented [here](./config.go) with detailed sample configurations [here](./testdata/config.yaml). \n\n## Metrics\n\nDetails about the metrics produced by this receiver can be found in [metadata.yaml](./metadata.yaml)","properties":{"check_sftp":{"title":"check_sftp","type":"boolean"},"collection_interval":{"title":"collection_interval","type":"string"},"endpoint":{"description":"Endpoint is always required","title":"endpoint","type":"string"},"ignore_host_key":{"description":"IgnoreHostKey provides an insecure path to quickstarts and testing","title":"ignore_host_key","type":"boolean"},"initial_delay":{"title":"initial_delay","type":"string"},"key_file":{"title":"key_file","type":"string"},"known_hosts":{"description":"file path to the known_hosts","title":"known_hosts","type":"string"},"metrics":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sshcheckreceiver.internal.metadata.MetricsConfig","title":"metrics"},"password":{"title":"password","type":"string"},"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sshcheckreceiver.internal.metadata.ResourceAttributesConfig","title":"resource_attributes"},"timeout":{"title":"timeout","type":"string"},"username":{"description":"authentication requires a Username and either a Password or KeyFile","title":"username","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sshcheckreceiver.internal.metadata.MetricConfig":{"additionalProperties":false,"description":"MetricConfig provides common config for a particular metric.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sshcheckreceiver.internal.metadata.MetricsConfig":{"additionalProperties":false,"description":"MetricsConfig provides config for sshcheck metrics.","properties":{"sshcheck.duration":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sshcheckreceiver.internal.metadata.MetricConfig","title":"sshcheck.duration"},"sshcheck.error":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sshcheckreceiver.internal.metadata.MetricConfig","title":"sshcheck.error"},"sshcheck.sftp_duration":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sshcheckreceiver.internal.metadata.MetricConfig","title":"sshcheck.sftp_duration"},"sshcheck.sftp_error":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sshcheckreceiver.internal.metadata.MetricConfig","title":"sshcheck.sftp_error"},"sshcheck.sftp_status":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sshcheckreceiver.internal.metadata.MetricConfig","title":"sshcheck.sftp_status"},"sshcheck.status":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sshcheckreceiver.internal.metadata.MetricConfig","title":"sshcheck.status"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sshcheckreceiver.internal.metadata.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig provides common config for a particular resource attribute.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sshcheckreceiver.internal.metadata.ResourceAttributesConfig":{"additionalProperties":false,"description":"ResourceAttributesConfig provides config for sshcheck resource attributes.","properties":{"ssh.endpoint":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sshcheckreceiver.internal.metadata.ResourceAttributeConfig","title":"ssh.endpoint"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.statsdreceiver.Config":{"additionalProperties":false,"description":"Config defines configuration for StatsD receiver.","markdownDescription":"# StatsD Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: metrics |\n| Distributions | [contrib], [aws], [splunk], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[aws]: https://github.com/aws-observability/aws-otel-collector\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\n\nStatsD receiver for ingesting StatsD messages(https://github.com/statsd/statsd/blob/master/docs/metric_types.md) into the OpenTelemetry Collector.\n\nUse case: it does not support horizontal pool of collectors. Desired work case is that customers use the receiver as an agent with a single input at the same time.\n\n## Configuration\n\nThe following settings are required:\n\n- `endpoint` (default = `localhost:8125`): Address and port to listen on.\n\n\nThe Following settings are optional:\n\n- `aggregation_interval: 70s`(default value is 60s): The aggregation time that the receiver aggregates the metrics (similar to the flush interval in StatsD server)\n\n- `enable_metric_type: true`(default value is false): Enable the statsd receiver to be able to emit the metric type(gauge, counter, timer(in the future), histogram(in the future)) as a label.\n\n- `is_monotonic_counter` (default value is false): Set all counter-type metrics the statsd receiver received as monotonic.\n\n- `timer_histogram_mapping:`(default value is below): Specify what OTLP type to convert received timing/histogram data to.\n\n\n`\"statsd_type\"` specifies received Statsd data type. Possible values for this setting are `\"timing\"`, `\"timer\"` and `\"histogram\"`.\n\n`\"observer_type\"` specifies OTLP data type to convert to. We support `\"gauge\"`, `\"summary\"`, and `\"histogram\"`. For `\"gauge\"`, it does not perform any aggregation.\nFor `\"summary`, the statsD receiver will aggregate to one OTLP summary metric for one metric description (the same metric name with the same tags). It will send percentile 0, 10, 50, 90, 95, 100 to the downstream. The `\"histogram\"` setting selects an [auto-scaling exponential histogram configured with only a maximum size](https://github.com/lightstep/go-expohisto#readme), as shown in the example below.\nTODO: Add a new option to use a smoothed summary like Prometheus: https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/3261 \n\nExample:\n\n```yaml\nreceivers:\n statsd:\n statsd/2:\n endpoint: \"localhost:8127\"\n aggregation_interval: 70s\n enable_metric_type: true\n is_monotonic_counter: false\n timer_histogram_mapping:\n - statsd_type: \"histogram\"\n observer_type: \"gauge\"\n - statsd_type: \"timing\"\n observer_type: \"histogram\"\n histogram: \n max_size: 100\n```\n\nThe full list of settings exposed for this receiver are documented [here](./config.go)\nwith detailed sample configurations [here](./testdata/config.yaml).\n\n## Aggregation\n\nAggregation is done in statsD receiver. The default aggregation interval is 60s. The receiver only aggregates the metrics with the same metric name, metric type, label keys and label values. After each aggregation interval, the receiver will send all metrics (after aggregation) in this aggregation interval to the following workflow.\n\nIt supports:\nCounter(transferred to int):\n- statsdTestMetric1:3000|c|#mykey:myvalue\nstatsdTestMetric1:4000|c|#mykey:myvalue\n(get the value after incrementation: 7000)\n- statsdTestMetric1:3000|c|#mykey:myvalue\nstatsdTestMetric1:20|c|@0.25|#mykey:myvalue\n(get the value after incrementation with sample rate: 3000+20/0.25=3080)\n\nWhen the receiver receives valid sample rate (greater than 0 and less than 1), we covert the count value to float, divide by the sample rate and then covert back to integer.\n\nThe official [doc](https://github.com/statsd/statsd/blob/master/docs/metric_types.md#counting) does not support negative counter, we follow this pattern at this time. There are some requests for negative counters, we need to ake a look if we want to support later. For example:\nhttps://github.com/influxdata/telegraf/issues/1898\nhttps://thenewstack.io/collecting-metrics-using-statsd-a-standard-for-real-time-monitoring/\nhttps://docs.datadoghq.com/developers/metrics/dogstatsd_metrics_submission/#count\n\nGauge(transferred to double):\n- statsdTestMetric1:500|g|#mykey:myvalue\nstatsdTestMetric1:400|g|#mykey:myvalue\n(get the latest value: 400)\n- statsdTestMetric1:500|g|#mykey:myvalue\nstatsdTestMetric1:+2|g|#mykey:myvalue\nstatsdTestMetric1:-1|g|#mykey:myvalue\n(get the value after calculation: 501)\n\n## Metrics\n\nGeneral format is:\n\n`\u003cname\u003e:\u003cvalue\u003e|\u003ctype\u003e|@\u003csample-rate\u003e|#\u003ctag1-key\u003e:\u003ctag1-value\u003e,\u003ctag2-k/v\u003e`\n\n### Counter\n\n`\u003cname\u003e:\u003cvalue\u003e|c|@\u003csample-rate\u003e|#\u003ctag1-key\u003e:\u003ctag1-value\u003e`\n\nIt supports sample rate.\nTODO: Need to change the implementation part for sample rate after OTLP supports sample rate as a parameter later.\n\n\n### Gauge\n\n`\u003cname\u003e:\u003cvalue\u003e|g|@\u003csample-rate\u003e|#\u003ctag1-key\u003e:\u003ctag1-value\u003e`\n\n\n### Timer\n\n`\u003cname\u003e:\u003cvalue\u003e|ms|@\u003csample-rate\u003e|#\u003ctag1-key\u003e:\u003ctag1-value\u003e`\n`\u003cname\u003e:\u003cvalue\u003e|h|@\u003csample-rate\u003e|#\u003ctag1-key\u003e:\u003ctag1-value\u003e`\n\nIt supports sample rate.\n\n\n## Testing\n\n### Full sample collector config\n\n```yaml\nreceivers:\n statsd:\n endpoint: \"localhost:8125\" # default\n aggregation_interval: 60s # default\n enable_metric_type: false # default\n is_monotonic_counter: false # default\n timer_histogram_mapping:\n - statsd_type: \"histogram\"\n observer_type: \"histogram\"\n histogram:\n max_size: 50\n - statsd_type: \"timing\"\n observer_type: \"summary\"\n\nexporters:\n file:\n path: ./test.json\n\nservice:\n pipelines:\n metrics:\n receivers: [statsd]\n exporters: [file]\n```\n\n### Send StatsD message into the receiver\n\nA simple way to send a metric to `localhost:8125`:\n\n`echo \"test.metric:42|c|#myKey:myVal\" | nc -w 1 -u localhost 8125`","properties":{"aggregation_interval":{"title":"aggregation_interval","type":"string"},"enable_metric_type":{"title":"enable_metric_type","type":"boolean"},"endpoint":{"description":"Endpoint configures the address for this network connection.\nFor TCP and UDP networks, the address has the form \"host:port\". The host must be a literal IP address,\nor a host name that can be resolved to IP addresses. The port must be a literal port number or a service name.\nIf the host is a literal IPv6 address it must be enclosed in square brackets, as in \"[2001:db8::1]:80\" or\n\"[fe80::1%zone]:80\". The zone specifies the scope of the literal IPv6 address as defined in RFC 4007.","title":"endpoint","type":"string"},"is_monotonic_counter":{"title":"is_monotonic_counter","type":"boolean"},"timer_histogram_mapping":{"items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.statsdreceiver.protocol.TimerHistogramMapping"},"title":"timer_histogram_mapping","type":"array"},"transport":{"description":"Transport to use. Known protocols are \"tcp\", \"tcp4\" (IPv4-only), \"tcp6\" (IPv6-only), \"udp\", \"udp4\" (IPv4-only),\n\"udp6\" (IPv6-only), \"ip\", \"ip4\" (IPv4-only), \"ip6\" (IPv6-only), \"unix\", \"unixgram\" and \"unixpacket\".","title":"transport","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.statsdreceiver.protocol.HistogramConfig":{"additionalProperties":false,"properties":{"max_size":{"title":"max_size","type":"integer"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.statsdreceiver.protocol.TimerHistogramMapping":{"additionalProperties":false,"properties":{"histogram":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.statsdreceiver.protocol.HistogramConfig","title":"histogram"},"observer_type":{"title":"observer_type","type":"string"},"statsd_type":{"title":"statsd_type","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.Config":{"additionalProperties":false,"description":"Config is the configuration of the receiver","markdownDescription":"# vCenter Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [alpha]: metrics |\n| Distributions | [contrib], [observiq], [sumo] |\n\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThis receiver fetches metrics from a vCenter or ESXi host running VMware vSphere APIs.\n\n## Prerequisites\n\nThis receiver has been built to support ESXi and vCenter versions:\n\n- 7.5\n- 7.0\n- 6.7\n\nA “Read Only” user assigned to a vSphere with permissions to the vCenter server, cluster and all subsequent resources being monitored must be specified in order for the receiver to retrieve information about them.\n\n## Configuration\n\n\n| Parameter | Default | Type | Notes |\n| ------------------- | ------- | ---------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |\n| endpoint | | String | Endpoint to the vCenter Server or ESXi host that has the sdk path enabled. Required. The expected format is `\u003cprotocol\u003e://\u003chostname\u003e` \u003cbr\u003e\u003cbr\u003e i.e: `https://vcsa.hostname.localnet` |\n| username | | String | Required |\n| password | | String | Required |\n| tls | | TLSClientSetting | Not Required. Will use defaults for [configtls.TLSClientSetting](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md). By default insecure settings are rejected and certificate verification is on. |\n| collection_interval | 2m | Duration | This receiver collects metrics on an interval. If the vCenter is fairly large, this value may need to be increased. Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h` |\n| initial_delay | 1s | Duration | Defines how long this receiver waits before starting. |\n\n### Example Configuration\n\n```yaml\nreceivers:\n vcenter:\n endpoint: http://localhost:15672\n username: otelu\n password: ${env:VCENTER_PASSWORD}\n collection_interval: 5m\n initial_delay: 1s\n metrics: []\n```\n\nThe full list of settings exposed for this receiver are documented [here](./config.go) with detailed sample configurations [here](./testdata/config.yaml). TLS config is documented further under the [opentelemetry collector's configtls package](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md).\n\n## Metrics\n\nDetails about the metrics produced by this receiver can be found in [metadata.yaml](./metadata.yaml) with further documentation in [documentation.md](./documentation.md)","properties":{"collection_interval":{"title":"collection_interval","type":"string"},"endpoint":{"title":"endpoint","type":"string"},"initial_delay":{"title":"initial_delay","type":"string"},"metrics":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricsConfig","title":"metrics"},"password":{"title":"password","type":"string"},"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.ResourceAttributesConfig","title":"resource_attributes"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","title":"tls"},"username":{"title":"username","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig":{"additionalProperties":false,"description":"MetricConfig provides common config for a particular metric.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricsConfig":{"additionalProperties":false,"description":"MetricsConfig provides config for vcenter metrics.","properties":{"vcenter.cluster.cpu.effective":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig","title":"vcenter.cluster.cpu.effective"},"vcenter.cluster.cpu.limit":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig","title":"vcenter.cluster.cpu.limit"},"vcenter.cluster.host.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig","title":"vcenter.cluster.host.count"},"vcenter.cluster.memory.effective":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig","title":"vcenter.cluster.memory.effective"},"vcenter.cluster.memory.limit":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig","title":"vcenter.cluster.memory.limit"},"vcenter.cluster.memory.used":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig","title":"vcenter.cluster.memory.used"},"vcenter.cluster.vm.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig","title":"vcenter.cluster.vm.count"},"vcenter.datastore.disk.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig","title":"vcenter.datastore.disk.usage"},"vcenter.datastore.disk.utilization":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig","title":"vcenter.datastore.disk.utilization"},"vcenter.host.cpu.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig","title":"vcenter.host.cpu.usage"},"vcenter.host.cpu.utilization":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig","title":"vcenter.host.cpu.utilization"},"vcenter.host.disk.latency.avg":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig","title":"vcenter.host.disk.latency.avg"},"vcenter.host.disk.latency.max":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig","title":"vcenter.host.disk.latency.max"},"vcenter.host.disk.throughput":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig","title":"vcenter.host.disk.throughput"},"vcenter.host.memory.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig","title":"vcenter.host.memory.usage"},"vcenter.host.memory.utilization":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig","title":"vcenter.host.memory.utilization"},"vcenter.host.network.packet.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig","title":"vcenter.host.network.packet.count"},"vcenter.host.network.packet.errors":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig","title":"vcenter.host.network.packet.errors"},"vcenter.host.network.throughput":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig","title":"vcenter.host.network.throughput"},"vcenter.host.network.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig","title":"vcenter.host.network.usage"},"vcenter.resource_pool.cpu.shares":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig","title":"vcenter.resource_pool.cpu.shares"},"vcenter.resource_pool.cpu.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig","title":"vcenter.resource_pool.cpu.usage"},"vcenter.resource_pool.memory.shares":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig","title":"vcenter.resource_pool.memory.shares"},"vcenter.resource_pool.memory.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig","title":"vcenter.resource_pool.memory.usage"},"vcenter.vm.cpu.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig","title":"vcenter.vm.cpu.usage"},"vcenter.vm.cpu.utilization":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig","title":"vcenter.vm.cpu.utilization"},"vcenter.vm.disk.latency.avg":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig","title":"vcenter.vm.disk.latency.avg"},"vcenter.vm.disk.latency.max":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig","title":"vcenter.vm.disk.latency.max"},"vcenter.vm.disk.throughput":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig","title":"vcenter.vm.disk.throughput"},"vcenter.vm.disk.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig","title":"vcenter.vm.disk.usage"},"vcenter.vm.disk.utilization":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig","title":"vcenter.vm.disk.utilization"},"vcenter.vm.memory.ballooned":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig","title":"vcenter.vm.memory.ballooned"},"vcenter.vm.memory.swapped":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig","title":"vcenter.vm.memory.swapped"},"vcenter.vm.memory.swapped_ssd":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig","title":"vcenter.vm.memory.swapped_ssd"},"vcenter.vm.memory.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig","title":"vcenter.vm.memory.usage"},"vcenter.vm.memory.utilization":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig","title":"vcenter.vm.memory.utilization"},"vcenter.vm.network.packet.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig","title":"vcenter.vm.network.packet.count"},"vcenter.vm.network.throughput":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig","title":"vcenter.vm.network.throughput"},"vcenter.vm.network.usage":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.MetricConfig","title":"vcenter.vm.network.usage"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig provides common config for a particular resource attribute.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.ResourceAttributesConfig":{"additionalProperties":false,"description":"ResourceAttributesConfig provides config for vcenter resource attributes.","properties":{"vcenter.cluster.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.ResourceAttributeConfig","title":"vcenter.cluster.name"},"vcenter.datastore.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.ResourceAttributeConfig","title":"vcenter.datastore.name"},"vcenter.host.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.ResourceAttributeConfig","title":"vcenter.host.name"},"vcenter.resource_pool.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.ResourceAttributeConfig","title":"vcenter.resource_pool.name"},"vcenter.vm.id":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.ResourceAttributeConfig","title":"vcenter.vm.id"},"vcenter.vm.name":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.internal.metadata.ResourceAttributeConfig","title":"vcenter.vm.name"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.wavefrontreceiver.Config":{"additionalProperties":false,"description":"Config defines configuration for the Wavefront receiver.","properties":{"endpoint":{"description":"Endpoint configures the address for this network connection.\nThe address has the form \"host:port\". The host must be a literal IP address, or a host name that can be\nresolved to IP addresses. The port must be a literal port number or a service name.\nIf the host is a literal IPv6 address it must be enclosed in square brackets, as in \"[2001:db8::1]:80\" or\n\"[fe80::1%zone]:80\". The zone specifies the scope of the literal IPv6 address as defined in RFC 4007.","title":"endpoint","type":"string"},"extract_collectd_tags":{"description":"ExtractCollectdTags instructs the Wavefront receiver to attempt to extract\ntags in the CollectD format from the metric name. The default is false.","title":"extract_collectd_tags","type":"boolean"},"tcp_idle_timeout":{"description":"TCPIdleTimeout is the timout for idle TCP connections.","title":"tcp_idle_timeout","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.windowseventlogreceiver.WindowsLogConfig":{"additionalProperties":false,"description":"WindowsLogConfig defines configuration for the windowseventlog receiver","markdownDescription":"## Windows Log Event Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [alpha]: logs |\n| Distributions | [observiq], [splunk], [sumo] |\n\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nTails and parses logs from windows event log API using the [opentelemetry-log-collection](https://github.com/open-telemetry/opentelemetry-log-collection) library.\n\n### Configuration Fields\n\n| Field | Default | Description |\n|-------------------------------------|--------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| `channel` | required | The windows event log channel to monitor |\n| `max_reads` | 100 | The maximum number of records read into memory, before beginning a new batch |\n| `start_at` | `end` | On first startup, where to start reading logs from the API. Options are `beginning` or `end` |\n| `poll_interval` | 1s | The interval at which the channel is checked for new log entries. This check begins again after all new bodies have been read. |\n| `attributes` | {} | A map of `key: value` pairs to add to the entry's attributes. |\n| `resource` | {} | A map of `key: value` pairs to add to the entry's resource. |\n| `operators` | [] | An array of [operators](https://github.com/open-telemetry/opentelemetry-log-collection/blob/main/docs/operators/README.md#what-operators-are-available). See below for more details |\n| `raw` | false | If true, the windows events are not processed and sent as XML. |\n| `storage` | none | The ID of a storage extension to be used to store bookmarks. Bookmarks allow the receiver to pick up where it left off in the case of a collector restart. If no storage extension is used, the receiver will manage bookmarks in memory only. |\n| `retry_on_failure.enabled` | `false` | If `true`, the receiver will pause reading a file and attempt to resend the current batch of logs if it encounters an error from downstream components. |\n| `retry_on_failure.initial_interval` | `1 second` | Time to wait after the first failure before retrying. |\n| `retry_on_failure.max_interval` | `30 seconds` | Upper bound on retry backoff interval. Once this value is reached the delay between consecutive retries will remain constant at the specified value. |\n| `retry_on_failure.max_elapsed_time` | `5 minutes` | Maximum amount of time (including retries) spent trying to send a logs batch to a downstream consumer. Once this value is reached, the data is discarded. Retrying never stops if set to `0`. |\n\n### Operators\n\nEach operator performs a simple responsibility, such as parsing a timestamp or JSON. Chain together operators to process logs into a desired format.\n\n- Every operator has a `type`.\n- Every operator can be given a unique `id`. If you use the same type of operator more than once in a pipeline, you must specify an `id`. Otherwise, the `id` defaults to the value of `type`.\n- Operators will output to the next operator in the pipeline. The last operator in the pipeline will emit from the receiver. Optionally, the `output` parameter can be used to specify the `id` of another operator to which logs will be passed directly.\n- Only parsers and general purpose operators should be used.\n\n## Additional Terminology and Features\n\n- An [entry](https://github.com/open-telemetry/opentelemetry-log-collection/blob/main/docs/types/entry.md) is the base representation of log data as it moves through a pipeline. All operators either create, modify, or consume entries.\n- A [field](https://github.com/open-telemetry/opentelemetry-log-collection/blob/main/docs/types/field.md) is used to reference values in an entry.\n- A common [expression](https://github.com/open-telemetry/opentelemetry-log-collection/blob/main/docs/types/expression.md) syntax is used in several operators. For example, expressions can be used to [filter](https://github.com/open-telemetry/opentelemetry-log-collection/blob/main/docs/operators/filter.md) or [route](https://github.com/open-telemetry/opentelemetry-log-collection/blob/main/docs/operators/router.md) entries.\n- [timestamp](https://github.com/open-telemetry/opentelemetry-log-collection/blob/main/docs/types/timestamp.md) parsing is available as a block within all parser operators, and also as a standalone operator. Many common timestamp layouts are supported.\n- [severity](https://github.com/open-telemetry/opentelemetry-log-collection/blob/main/docs/types/severity.md) parsing is available as a block within all parser operators, and also as a standalone operator. Stanza uses a flexible severity representation which is automatically interpreted by the stanza receiver.\n\n### Example Configurations\n\n#### Simple\n\nConfiguration:\n```yaml\nreceivers:\n windowseventlog:\n channel: application\n```\n\nOutput entry sample:\n```json\n{\n \"channel\": \"Application\",\n \"computer\": \"computer name\",\n \"event_id\":\n {\n \"id\": 10,\n \"qualifiers\": 0\n },\n \"keywords\": \"[Classic]\",\n \"level\": \"Information\",\n \"message\": \"Test log\",\n \"opcode\": \"Info\",\n \"provider\":\n {\n \"event_source\": \"\",\n \"guid\": \"\",\n \"name\": \"otel\"\n },\n \"record_id\": 12345,\n \"system_time\": \"2022-04-15T15:28:08.898974100Z\",\n \"task\": \"\"\n}\n```","properties":{"operators":{"items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.pkg.stanza.operator.Config"},"title":"operators","type":"array"},"retry_on_failure":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.internal.coreinternal.consumerretry.Config","title":"retry_on_failure"},"storage":{"title":"storage","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.windowsperfcountersreceiver.Config":{"additionalProperties":false,"description":"Config defines configuration for WindowsPerfCounters receiver.","markdownDescription":"# Windows Performance Counters Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: metrics |\n| Distributions | [contrib], [observiq], [splunk], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThis receiver, for Windows only, captures the configured system, application, or\ncustom performance counter data from the Windows registry using the [PDH\ninterface](https://docs.microsoft.com/en-us/windows/win32/perfctrs/using-the-pdh-functions-to-consume-counter-data).\nIt is based on the [Telegraf Windows Performance Counters Input\nPlugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_perf_counters).\n\n- `Memory\\Committed Bytes`\n- `Processor\\% Processor Time`, with a datapoint for each `Instance` label = (`_Total`, `1`, `2`, `3`, ... )\n\nIf one of the specified performance counters cannot be loaded on startup, a\nwarning will be printed, but the application will not fail fast. It is expected\nthat some performance counters may not exist on some systems due to different OS\nconfiguration.\n\n## Configuration\n\nThe collection interval and the list of performance counters to be scraped can\nbe configured:\n\n```yaml\nwindowsperfcounters:\n collection_interval: \u003cduration\u003e # default = \"1m\"\n initial_delay: \u003cduration\u003e # default = \"1s\"\n metrics:\n \u003cmetric name\u003e:\n description: \u003cdescription\u003e\n unit: \u003cunit type\u003e\n gauge:\n \u003cmetric name\u003e:\n description: \u003cdescription\u003e\n unit: \u003cunit type\u003e\n sum:\n aggregation: \u003ccumulative or delta\u003e\n monotonic: \u003ctrue or false\u003e\n perfcounters:\n - object: \u003cobject name\u003e\n instances: [\u003cinstance name\u003e]*\n counters:\n - name: \u003ccounter name\u003e\n metric: \u003cmetric name\u003e\n attributes:\n \u003ckey\u003e: \u003cvalue\u003e\n```\n\n*Note `instances` can have several special values depending on the type of\ncounter:\n\nValue | Interpretation\n-- | --\nNot specified | This is the only valid value if the counter has no instances\n`\"*\"` | All instances\n`\"_Total\"` | The \"total\" instance\n`\"instance1\"` | A single instance\n`[\"instance1\", \"instance2\", ...]` | A set of instances\n`[\"_Total\", \"instance1\", \"instance2\", ...]` | A set of instances including the \"total\" instance\n\n### Scraping at different frequencies\n\nIf you would like to scrape some counters at a different frequency than others,\nyou can configure multiple `windowsperfcounters` receivers with different\n`collection_interval` values. For example:\n\n```yaml\nreceivers:\n windowsperfcounters/memory:\n metrics:\n bytes.committed:\n description: the number of bytes committed to memory\n unit: By\n gauge:\n collection_interval: 30s\n perfcounters:\n - object: Memory\n counters:\n - name: Committed Bytes\n metric: bytes.committed\n\n windowsperfcounters/processor:\n collection_interval: 1m\n metrics:\n processor.time:\n description: active and idle time of the processor\n unit: \"%\"\n gauge:\n perfcounters:\n - object: \"Processor\"\n instances: \"*\"\n counters:\n - name: \"% Processor Time\"\n metric: processor.time\n attributes:\n state: active\n - object: \"Processor\"\n instances: [1, 2]\n counters:\n - name: \"% Idle Time\"\n metric: processor.time\n attributes:\n state: idle\n\nservice:\n pipelines:\n metrics:\n receivers: [windowsperfcounters/memory, windowsperfcounters/processor]\n```\n\n### Defining metric format\n\nTo report metrics in the desired output format, define a metric and reference it in the corresponding counter, along with any applicable attributes. The metric's data type can either be `gauge` (default) or `sum`. \n\n| Field Name | Description | Value | Default |\n| -- | -- | -- | -- |\n| name | The key for the metric. | string | Counter Name |\n| description | definition of what the metric measures. | string | |\n| unit | what is being measured. | string | `1` |\n| sum | representation of a sum metric. | Sum Config | |\n| gauge | representation of a gauge metric. | Gauge Config | |\n\n\n#### Sum Config\n\n| Field Name | Description | Value | Default |\n| -- | -- | -- | -- |\n| aggregation | The type of aggregation temporality for the metric. | [`cumulative` or `delta`] | |\n| monotonic | whether or not the metric value can decrease. | false | |\n\n#### Gauge Config\n\nA `gauge` config currently accepts no settings. It is specified as an object for forwards compatibility.\n\ne.g. To output the `Memory/Committed Bytes` counter as a metric with the name\n`bytes.committed`:\n\n```yaml\nreceivers:\n windowsperfcounters:\n metrics:\n bytes.committed:\n description: the number of bytes committed to memory\n unit: By\n gauge:\n collection_interval: 30s\n perfcounters:\n - object: Memory\n counters:\n - name: Committed Bytes\n metric: bytes.committed\n\nservice:\n pipelines:\n metrics:\n receivers: [windowsperfcounters]\n```\n\n## Known Limitation\n- The network interface is not available inside the container. Hence, the metrics for the object `Network Interface` aren't generated in that scenario. In the case of sub-process, it captures `Network Interface` metrics. There is a similar open issue in [Github](https://github.com/influxdata/telegraf/issues/5357) and [Docker](https://forums.docker.com/t/unable-to-collect-network-metrics-inside-windows-container-on-windows-server-2016-data-center/69480) forum.","properties":{"collection_interval":{"title":"collection_interval","type":"string"},"initial_delay":{"title":"initial_delay","type":"string"},"metrics":{"patternProperties":{".*":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.windowsperfcountersreceiver.MetricConfig"}},"title":"metrics","type":"object"},"perfcounters":{"items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.windowsperfcountersreceiver.ObjectConfig"},"title":"perfcounters","type":"array"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.windowsperfcountersreceiver.CounterConfig":{"additionalProperties":false,"description":"CounterConfig defines the individual counter in an object.","properties":{"attributes":{"patternProperties":{".*":{"type":"string"}},"title":"attributes","type":"object"},"metric":{"title":"metric","type":"string"},"name":{"title":"name","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.windowsperfcountersreceiver.GaugeMetric":{"additionalProperties":false,"properties":{},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.windowsperfcountersreceiver.MetricConfig":{"additionalProperties":false,"description":"MetricsConfig defines the configuration for a metric to be created.","properties":{"description":{"title":"description","type":"string"},"gauge":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.windowsperfcountersreceiver.GaugeMetric","title":"gauge"},"sum":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.windowsperfcountersreceiver.SumMetric","title":"sum"},"unit":{"title":"unit","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.windowsperfcountersreceiver.ObjectConfig":{"additionalProperties":false,"description":"ObjectConfig defines configuration for a perf counter object.","properties":{"counters":{"items":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.windowsperfcountersreceiver.CounterConfig"},"title":"counters","type":"array"},"instances":{"items":{"type":"string"},"title":"instances","type":"array"},"object":{"title":"object","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.windowsperfcountersreceiver.SumMetric":{"additionalProperties":false,"properties":{"aggregation":{"title":"aggregation","type":"string"},"monotonic":{"title":"monotonic","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.zipkinreceiver.Config":{"additionalProperties":false,"description":"Config defines configuration for Zipkin receiver.","markdownDescription":"# Zipkin Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [beta]: traces |\n| Distributions | [core], [contrib], [aws], [observiq], [redhat], [splunk], [sumo] |\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[aws]: https://github.com/aws-observability/aws-otel-collector\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[redhat]: https://github.com/os-observability/redhat-opentelemetry-collector\n[splunk]: https://github.com/signalfx/splunk-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThis receiver receives spans from [Zipkin](https://zipkin.io/) (V1 and V2).\n\n## Getting Started\n\nAll that is required to enable the Zipkin receiver is to include it in the\nreceiver definitions.\n\n```yaml\nreceivers:\n zipkin:\n```\n\nThe following settings are configurable:\n\n- `endpoint` (default = 0.0.0.0:9411): host:port on which the receiver is going to receive data.\n- `parse_string_tags` (default = false): if enabled, the receiver will attempt to parse string tags/binary annotations into int/bool/float.\n\n## Advanced Configuration\n\nSeveral helper files are leveraged to provide additional capabilities automatically:\n\n- [HTTP server settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/confighttp/README.md#server-configuration) including CORS\n- [TLS and mTLS settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md)","properties":{"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth for this receiver","title":"auth"},"cors":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.confighttp.CORSSettings","description":"CORS configures the server for HTTP cross-origin resource sharing (CORS).","title":"cors"},"endpoint":{"description":"Endpoint configures the listening address for the server.","title":"endpoint","type":"string"},"include_metadata":{"description":"IncludeMetadata propagates the client metadata from the incoming requests to the downstream consumers\nExperimental: *NOTE* this option is subject to change or removal in the future.","title":"include_metadata","type":"boolean"},"max_request_body_size":{"description":"MaxRequestBodySize sets the maximum request body size in bytes","title":"max_request_body_size","type":"integer"},"parse_string_tags":{"description":"If enabled the zipkin receiver will attempt to parse string tags/binary annotations into int/bool/float.\nDisabled by default","title":"parse_string_tags","type":"boolean"},"response_headers":{"description":"Additional headers attached to each HTTP response sent to the client.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"response_headers","type":"object"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSServerSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.zookeeperreceiver.Config":{"additionalProperties":false,"markdownDescription":"# Zookeeper Receiver\n\n\u003c!-- status autogenerated section --\u003e\n| Status | |\n| ------------- |-----------|\n| Stability | [development]: metrics |\n| Distributions | [contrib], [observiq], [sumo] |\n\n[development]: https://github.com/open-telemetry/opentelemetry-collector#development\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[observiq]: https://github.com/observIQ/observiq-otel-collector\n[sumo]: https://github.com/SumoLogic/sumologic-otel-collector\n\u003c!-- end autogenerated section --\u003e\n\nThe Zookeeper receiver collects metrics from a Zookeeper instance, using the `mntr` command. The `mntr` 4 letter word command needs\nto be enabled for the receiver to be able to collect metrics.\n\n## Configuration\n\n- `endpoint`: (default = `:2181`) Endpoint to connect to collect metrics. Takes the form `host:port`.\n- `timeout`: (default = `10s`) Timeout within which requests should be completed.\n- `initial_delay` (default = `1s`): defines how long this receiver waits before starting.\n\nExample configuration.\n\n```yaml\nreceivers:\n zookeeper:\n endpoint: \"localhost:2181\"\n collection_interval: 20s\n initial_delay: 1s\n```\n\n## Metrics\n\nDetails about the metrics produced by this receiver can be found in [metadata.yaml](./metadata.yaml) with further documentation in [documentation.md](./documentation.md)\n\n## Limitations\n\nThis receiver does not support scraping metrics from Zookeeper's [New Metric System](https://zookeeper.apache.org/doc/r3.6.3/zookeeperMonitor.html#Metrics-System).","properties":{"collection_interval":{"title":"collection_interval","type":"string"},"endpoint":{"description":"Endpoint configures the address for this network connection.\nThe address has the form \"host:port\". The host must be a literal IP address, or a host name that can be\nresolved to IP addresses. The port must be a literal port number or a service name.\nIf the host is a literal IPv6 address it must be enclosed in square brackets, as in \"[2001:db8::1]:80\" or\n\"[fe80::1%zone]:80\". The zone specifies the scope of the literal IPv6 address as defined in RFC 4007.","title":"endpoint","type":"string"},"initial_delay":{"title":"initial_delay","type":"string"},"metrics":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.zookeeperreceiver.internal.metadata.MetricsConfig","title":"metrics"},"resource_attributes":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.zookeeperreceiver.internal.metadata.ResourceAttributesConfig","title":"resource_attributes"},"timeout":{"description":"Timeout within which requests should be completed.","title":"timeout","type":"string"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.zookeeperreceiver.internal.metadata.MetricConfig":{"additionalProperties":false,"description":"MetricConfig provides common config for a particular metric.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.zookeeperreceiver.internal.metadata.MetricsConfig":{"additionalProperties":false,"description":"MetricsConfig provides config for zookeeper metrics.","properties":{"zookeeper.connection.active":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.zookeeperreceiver.internal.metadata.MetricConfig","title":"zookeeper.connection.active"},"zookeeper.data_tree.ephemeral_node.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.zookeeperreceiver.internal.metadata.MetricConfig","title":"zookeeper.data_tree.ephemeral_node.count"},"zookeeper.data_tree.size":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.zookeeperreceiver.internal.metadata.MetricConfig","title":"zookeeper.data_tree.size"},"zookeeper.file_descriptor.limit":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.zookeeperreceiver.internal.metadata.MetricConfig","title":"zookeeper.file_descriptor.limit"},"zookeeper.file_descriptor.open":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.zookeeperreceiver.internal.metadata.MetricConfig","title":"zookeeper.file_descriptor.open"},"zookeeper.follower.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.zookeeperreceiver.internal.metadata.MetricConfig","title":"zookeeper.follower.count"},"zookeeper.fsync.exceeded_threshold.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.zookeeperreceiver.internal.metadata.MetricConfig","title":"zookeeper.fsync.exceeded_threshold.count"},"zookeeper.latency.avg":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.zookeeperreceiver.internal.metadata.MetricConfig","title":"zookeeper.latency.avg"},"zookeeper.latency.max":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.zookeeperreceiver.internal.metadata.MetricConfig","title":"zookeeper.latency.max"},"zookeeper.latency.min":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.zookeeperreceiver.internal.metadata.MetricConfig","title":"zookeeper.latency.min"},"zookeeper.packet.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.zookeeperreceiver.internal.metadata.MetricConfig","title":"zookeeper.packet.count"},"zookeeper.request.active":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.zookeeperreceiver.internal.metadata.MetricConfig","title":"zookeeper.request.active"},"zookeeper.ruok":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.zookeeperreceiver.internal.metadata.MetricConfig","title":"zookeeper.ruok"},"zookeeper.sync.pending":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.zookeeperreceiver.internal.metadata.MetricConfig","title":"zookeeper.sync.pending"},"zookeeper.watch.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.zookeeperreceiver.internal.metadata.MetricConfig","title":"zookeeper.watch.count"},"zookeeper.znode.count":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.zookeeperreceiver.internal.metadata.MetricConfig","title":"zookeeper.znode.count"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.zookeeperreceiver.internal.metadata.ResourceAttributeConfig":{"additionalProperties":false,"description":"ResourceAttributeConfig provides common config for a particular resource attribute.","properties":{"enabled":{"title":"enabled","type":"boolean"}},"type":"object"},"github.com.open-telemetry.opentelemetry-collector-contrib.receiver.zookeeperreceiver.internal.metadata.ResourceAttributesConfig":{"additionalProperties":false,"description":"ResourceAttributesConfig provides config for zookeeper resource attributes.","properties":{"server.state":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.zookeeperreceiver.internal.metadata.ResourceAttributeConfig","title":"server.state"},"zk.version":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.zookeeperreceiver.internal.metadata.ResourceAttributeConfig","title":"zk.version"}},"type":"object"},"github.com.prometheus.client_golang.prometheus.Labels":{"patternProperties":{".*":{"type":"string"}},"type":"object"},"go.opentelemetry.io.collector.config.configauth.Authentication":{"additionalProperties":false,"description":"Authentication defines the auth settings for the receiver.","markdownDescription":"# Authentication configuration\n\nThis module defines necessary interfaces to implement server and client type authenticators:\n\n- Server type authenticators perform authentication for incoming HTTP/gRPC requests and are typically used in receivers.\n- Client type authenticators perform client-side authentication for outgoing HTTP/gRPC requests and are typically used in exporters.\n\nThe currently known authenticators are:\n\n- Server Authenticators\n - [oidc](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/extension/oidcauthextension)\n\n- Client Authenticators\n - [oauth2](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/extension/oauth2clientauthextension)\n - [BearerToken](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/extension/bearertokenauthextension)\n\nExamples:\n```yaml\nextensions:\n oidc:\n # see the blog post on securing the otelcol for information\n # on how to setup an OIDC server and how to generate the TLS certs\n # required for this example\n # https://medium.com/opentelemetry/securing-your-opentelemetry-collector-1a4f9fa5bd6f\n issuer_url: http://localhost:8080/auth/realms/opentelemetry\n audience: account\n\n oauth2client:\n client_id: someclientid\n client_secret: someclientsecret\n token_url: https://example.com/oauth2/default/v1/token\n scopes: [\"api.metrics\"]\n # tls settings for the token client\n tls:\n insecure: true\n ca_file: /var/lib/mycert.pem\n cert_file: certfile\n key_file: keyfile\n # timeout for the token client\n timeout: 2s\n\nreceivers:\n otlp/with_auth:\n protocols:\n grpc:\n endpoint: localhost:4318\n tls:\n cert_file: /tmp/certs/cert.pem\n key_file: /tmp/certs/cert-key.pem\n auth:\n ## oidc is the extension name to use as the authenticator for this receiver\n authenticator: oidc\n\n otlphttp/withauth:\n endpoint: http://localhost:9000\n auth:\n authenticator: oauth2client\n\n```\n\n## Creating an authenticator\n\nNew authenticators can be added by creating a new extension that also implements the appropriate interface (`configauth.ServerAuthenticator` or `configauth.ClientAuthenticator`).\n\nGeneric authenticators that may be used by a good number of users might be accepted as part of the contrib distribution. If you have an interest in contributing an authenticator, open an issue with your proposal. For other cases, you'll need to include your custom authenticator as part of your custom OpenTelemetry Collector, perhaps being built using the [OpenTelemetry Collector Builder](https://github.com/open-telemetry/opentelemetry-collector/tree/main/cmd/builder).","properties":{"authenticator":{"description":"AuthenticatorID specifies the name of the extension to use in order to authenticate the incoming data point.","title":"authenticator","type":"string"}},"type":"object"},"go.opentelemetry.io.collector.config.configgrpc.GRPCClientSettings":{"additionalProperties":false,"description":"GRPCClientSettings defines common settings for a gRPC client configuration.","properties":{"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing RPCs.","title":"auth"},"balancer_name":{"description":"Sets the balancer in grpclb_policy to discover the servers. Default is pick_first.\nhttps://github.com/grpc/grpc-go/blob/master/examples/features/load_balancing/README.md","title":"balancer_name","type":"string"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"endpoint":{"description":"The target to which the exporter is going to send traces or metrics,\nusing the gRPC protocol. The valid syntax is described at\nhttps://github.com/grpc/grpc/blob/master/doc/naming.md.","title":"endpoint","type":"string"},"headers":{"description":"The headers associated with gRPC requests.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"keepalive":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configgrpc.KeepaliveClientConfig","description":"The keepalive parameters for gRPC client. See grpc.WithKeepaliveParams.\n(https://godoc.org/google.golang.org/grpc#WithKeepaliveParams).","title":"keepalive"},"read_buffer_size":{"description":"ReadBufferSize for gRPC client. See grpc.WithReadBufferSize.\n(https://godoc.org/google.golang.org/grpc#WithReadBufferSize).","title":"read_buffer_size","type":"integer"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"wait_for_ready":{"description":"WaitForReady parameter configures client to wait for ready state before sending data.\n(https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md)","title":"wait_for_ready","type":"boolean"},"write_buffer_size":{"description":"WriteBufferSize for gRPC gRPC. See grpc.WithWriteBufferSize.\n(https://godoc.org/google.golang.org/grpc#WithWriteBufferSize).","title":"write_buffer_size","type":"integer"}},"type":"object"},"go.opentelemetry.io.collector.config.configgrpc.GRPCServerSettings":{"additionalProperties":false,"description":"GRPCServerSettings defines common settings for a gRPC server configuration.","properties":{"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth for this receiver","title":"auth"},"endpoint":{"description":"Endpoint configures the address for this network connection.\nFor TCP and UDP networks, the address has the form \"host:port\". The host must be a literal IP address,\nor a host name that can be resolved to IP addresses. The port must be a literal port number or a service name.\nIf the host is a literal IPv6 address it must be enclosed in square brackets, as in \"[2001:db8::1]:80\" or\n\"[fe80::1%zone]:80\". The zone specifies the scope of the literal IPv6 address as defined in RFC 4007.","title":"endpoint","type":"string"},"include_metadata":{"description":"Include propagates the incoming connection's metadata to downstream consumers.\nExperimental: *NOTE* this option is subject to change or removal in the future.","title":"include_metadata","type":"boolean"},"keepalive":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configgrpc.KeepaliveServerConfig","description":"Keepalive anchor for all the settings related to keepalive.","title":"keepalive"},"max_concurrent_streams":{"description":"MaxConcurrentStreams sets the limit on the number of concurrent streams to each ServerTransport.\nIt has effect only for streaming RPCs.","title":"max_concurrent_streams","type":"integer"},"max_recv_msg_size_mib":{"description":"MaxRecvMsgSizeMiB sets the maximum size (in MiB) of messages accepted by the server.","title":"max_recv_msg_size_mib","type":"integer"},"read_buffer_size":{"description":"ReadBufferSize for gRPC server. See grpc.ReadBufferSize.\n(https://godoc.org/google.golang.org/grpc#ReadBufferSize).","title":"read_buffer_size","type":"integer"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSServerSetting","description":"Configures the protocol to use TLS.\nThe default value is nil, which will cause the protocol to not use TLS.","title":"tls"},"transport":{"description":"Transport to use. Known protocols are \"tcp\", \"tcp4\" (IPv4-only), \"tcp6\" (IPv6-only), \"udp\", \"udp4\" (IPv4-only),\n\"udp6\" (IPv6-only), \"ip\", \"ip4\" (IPv4-only), \"ip6\" (IPv6-only), \"unix\", \"unixgram\" and \"unixpacket\".","title":"transport","type":"string"},"write_buffer_size":{"description":"WriteBufferSize for gRPC server. See grpc.WriteBufferSize.\n(https://godoc.org/google.golang.org/grpc#WriteBufferSize).","title":"write_buffer_size","type":"integer"}},"type":"object"},"go.opentelemetry.io.collector.config.configgrpc.KeepaliveClientConfig":{"additionalProperties":false,"description":"KeepaliveClientConfig exposes the keepalive.ClientParameters to be used by the exporter.","markdownDescription":"# gRPC Configuration Settings\n\ngRPC exposes a [variety of settings](https://godoc.org/google.golang.org/grpc).\nSeveral of these settings are available for configuration within individual\nreceivers or exporters. In general, none of these settings should need to be\nadjusted.\n\n## Client Configuration\n\n[Exporters](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/README.md)\nleverage client configuration.\n\nNote that client configuration supports TLS configuration, the\nconfiguration parameters are also defined under `tls` like server\nconfiguration. For more information, see [configtls\nREADME](../configtls/README.md).\n\n- [`balancer_name`](https://github.com/grpc/grpc-go/blob/master/examples/features/load_balancing/README.md)\n- `compression` Compression type to use among `gzip`, `snappy`, `zstd`, and `none`.\n- `endpoint`: Valid value syntax available [here](https://github.com/grpc/grpc/blob/master/doc/naming.md)\n- [`tls`](../configtls/README.md)\n- `headers`: name/value pairs added to the request\n- [`keepalive`](https://godoc.org/google.golang.org/grpc/keepalive#ClientParameters)\n - `permit_without_stream`\n - `time`\n - `timeout`\n- [`read_buffer_size`](https://godoc.org/google.golang.org/grpc#ReadBufferSize)\n- [`write_buffer_size`](https://godoc.org/google.golang.org/grpc#WriteBufferSize)\n\nPlease note that [`per_rpc_auth`](https://pkg.go.dev/google.golang.org/grpc#PerRPCCredentials) which allows the credentials to send for every RPC is now moved to become an [extension](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/extension/bearertokenauthextension). Note that this feature isn't about sending the headers only during the initial connection as an `authorization` header under the `headers` would do: this is sent for every RPC performed during an established connection.\n\nExample:\n\n```yaml\nexporters:\n otlp:\n endpoint: otelcol2:55690\n tls:\n ca_file: ca.pem\n cert_file: cert.pem\n key_file: key.pem\n headers:\n test1: \"value1\"\n \"test 2\": \"value 2\"\n```\n\n### Compression Comparison\n\n[configgrpc_benchmark_test.go](./configgrpc_benchmark_test.go) contains benchmarks comparing the supported compression algorithms. It performs compression using `gzip`, `zstd`, and `snappy` compression on small, medium, and large sized log, trace, and metric payloads. Each test case outputs the uncompressed payload size, the compressed payload size, and the average nanoseconds spent on compression. \n\nThe following table summarizes the results, including some additional columns computed from the raw data. The benchmarks were performed on an AWS m5.large EC2 instance with an Intel(R) Xeon(R) Platinum 8259CL CPU @ 2.50GHz.\n\n| Request | Compressor | Raw Bytes | Compressed bytes | Compression ratio | Ns / op | Mb compressed / second | Mb saved / second |\n|-------------------|------------|-----------|------------------|-------------------|---------|------------------------|-------------------|\n| lg_log_request | gzip | 5150 | 262 | 19.66 | 49231 | 104.61 | 99.29 |\n| lg_metric_request | gzip | 6800 | 201 | 33.83 | 51816 | 131.23 | 127.35 |\n| lg_trace_request | gzip | 9200 | 270 | 34.07 | 65174 | 141.16 | 137.02 |\n| md_log_request | gzip | 363 | 268 | 1.35 | 37609 | 9.65 | 2.53 |\n| md_metric_request | gzip | 320 | 145 | 2.21 | 30141 | 10.62 | 5.81 |\n| md_trace_request | gzip | 451 | 288 | 1.57 | 38270 | 11.78 | 4.26 |\n| sm_log_request | gzip | 166 | 168 | 0.99 | 30511 | 5.44 | -0.07 |\n| sm_metric_request | gzip | 185 | 142 | 1.30 | 29055 | 6.37 | 1.48 |\n| sm_trace_request | gzip | 233 | 205 | 1.14 | 33466 | 6.96 | 0.84 |\n| lg_log_request | snappy | 5150 | 475 | 10.84 | 1915 | 2,689.30 | 2,441.25 |\n| lg_metric_request | snappy | 6800 | 466 | 14.59 | 2266 | 3,000.88 | 2,795.23 |\n| lg_trace_request | snappy | 9200 | 644 | 14.29 | 3281 | 2,804.02 | 2,607.74 |\n| md_log_request | snappy | 363 | 300 | 1.21 | 770.0 | 471.43 | 81.82 |\n| md_metric_request | snappy | 320 | 162 | 1.98 | 588.6 | 543.66 | 268.43 |\n| md_trace_request | snappy | 451 | 330 | 1.37 | 907.7 | 496.86 | 133.30 |\n| sm_log_request | snappy | 166 | 184 | 0.90 | 551.8 | 300.83 | -32.62 |\n| sm_metric_request | snappy | 185 | 154 | 1.20 | 526.3 | 351.51 | 58.90 |\n| sm_trace_request | snappy | 233 | 251 | 0.93 | 682.1 | 341.59 | -26.39 |\n| lg_log_request | zstd | 5150 | 223 | 23.09 | 17998 | 286.14 | 273.75 |\n| lg_metric_request | zstd | 6800 | 144 | 47.22 | 14289 | 475.89 | 465.81 |\n| lg_trace_request | zstd | 9200 | 208 | 44.23 | 17160 | 536.13 | 524.01 |\n| md_log_request | zstd | 363 | 261 | 1.39 | 11216 | 32.36 | 9.09 |\n| md_metric_request | zstd | 320 | 145 | 2.21 | 9318 | 34.34 | 18.78 |\n| md_trace_request | zstd | 451 | 301 | 1.50 | 12583 | 35.84 | 11.92 |\n| sm_log_request | zstd | 166 | 165 | 1.01 | 12482 | 13.30 | 0.08 |\n| sm_metric_request | zstd | 185 | 139 | 1.33 | 8824 | 20.97 | 5.21 |\n| sm_trace_request | zstd | 233 | 203 | 1.15 | 10134 | 22.99 | 2.96 |\n\nCompression ratios will vary in practice as they are highly dependent on the data's information entropy. Compression rates are dependent on the speed of the CPU, and the size of payloads being compressed: smaller payloads compress at slower rates relative to larger payloads, which are able to amortize fixed computation costs over more bytes.\n\n`gzip` is the only required compression algorithm required for [OTLP servers](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/otlp.md#protocol-details), and is a natural first choice. It is not as fast as `snappy`, but achieves better compression ratios and has reasonable performance. If your collector is CPU bound and your OTLP server supports it, you may benefit from using `snappy` compression. If your collector is CPU bound and has a very fast network link, you may benefit from disabling compression, which is the default.\n\n## Server Configuration\n\n[Receivers](https://github.com/open-telemetry/opentelemetry-collector/blob/main/receiver/README.md)\nleverage server configuration.\n\nNote that transport configuration can also be configured. For more information,\nsee [confignet README](../confignet/README.md).\n\n- [`keepalive`](https://godoc.org/google.golang.org/grpc/keepalive#ServerParameters)\n - [`enforcement_policy`](https://godoc.org/google.golang.org/grpc/keepalive#EnforcementPolicy)\n - `min_time`\n - `permit_without_stream`\n - [`server_parameters`](https://godoc.org/google.golang.org/grpc/keepalive#ServerParameters)\n - `max_connection_age`\n - `max_connection_age_grace`\n - `max_connection_idle`\n - `time`\n - `timeout`\n- [`max_concurrent_streams`](https://godoc.org/google.golang.org/grpc#MaxConcurrentStreams)\n- [`max_recv_msg_size_mib`](https://godoc.org/google.golang.org/grpc#MaxRecvMsgSize)\n- [`read_buffer_size`](https://godoc.org/google.golang.org/grpc#ReadBufferSize)\n- [`tls`](../configtls/README.md)\n- [`write_buffer_size`](https://godoc.org/google.golang.org/grpc#WriteBufferSize)","properties":{"permit_without_stream":{"title":"permit_without_stream","type":"boolean"},"time":{"title":"time","type":"string"},"timeout":{"title":"timeout","type":"string"}},"type":"object"},"go.opentelemetry.io.collector.config.configgrpc.KeepaliveEnforcementPolicy":{"additionalProperties":false,"description":"KeepaliveEnforcementPolicy allow configuration of the keepalive.EnforcementPolicy.","properties":{"min_time":{"title":"min_time","type":"string"},"permit_without_stream":{"title":"permit_without_stream","type":"boolean"}},"type":"object"},"go.opentelemetry.io.collector.config.configgrpc.KeepaliveServerConfig":{"additionalProperties":false,"description":"KeepaliveServerConfig is the configuration for keepalive.","properties":{"enforcement_policy":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configgrpc.KeepaliveEnforcementPolicy","title":"enforcement_policy"},"server_parameters":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configgrpc.KeepaliveServerParameters","title":"server_parameters"}},"type":"object"},"go.opentelemetry.io.collector.config.configgrpc.KeepaliveServerParameters":{"additionalProperties":false,"description":"KeepaliveServerParameters allow configuration of the keepalive.ServerParameters.","properties":{"max_connection_age":{"title":"max_connection_age","type":"string"},"max_connection_age_grace":{"title":"max_connection_age_grace","type":"string"},"max_connection_idle":{"title":"max_connection_idle","type":"string"},"time":{"title":"time","type":"string"},"timeout":{"title":"timeout","type":"string"}},"type":"object"},"go.opentelemetry.io.collector.config.confighttp.CORSSettings":{"additionalProperties":false,"description":"CORSSettings configures a receiver for HTTP cross-origin resource sharing (CORS).","properties":{"allowed_headers":{"description":"AllowedHeaders sets what headers will be allowed in CORS requests.\nThe Accept, Accept-Language, Content-Type, and Content-Language\nheaders are implicitly allowed. If no headers are listed,\nX-Requested-With will also be accepted by default. Include \"*\" to\nallow any request header.","items":{"type":"string"},"title":"allowed_headers","type":"array"},"allowed_origins":{"description":"AllowedOrigins sets the allowed values of the Origin header for\nHTTP/JSON requests to an OTLP receiver. An origin may contain a\nwildcard (*) to replace 0 or more characters (e.g.,\n\"http://*.domain.com\", or \"*\" to allow any origin).","items":{"type":"string"},"title":"allowed_origins","type":"array"},"max_age":{"description":"MaxAge sets the value of the Access-Control-Max-Age response header.\nSet it to the number of seconds that browsers should cache a CORS\npreflight response for.","title":"max_age","type":"integer"}},"type":"object"},"go.opentelemetry.io.collector.config.confighttp.HTTPClientSettings":{"additionalProperties":false,"description":"HTTPClientSettings defines settings for creating an HTTP client.","markdownDescription":"# HTTP Configuration Settings\n\nHTTP exposes a [variety of settings](https://golang.org/pkg/net/http/).\nSeveral of these settings are available for configuration within individual\nreceivers or exporters.\n\n## Client Configuration\n\n[Exporters](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/README.md)\nleverage client configuration.\n\nNote that client configuration supports TLS configuration, the\nconfiguration parameters are also defined under `tls` like server\nconfiguration. For more information, see [configtls\nREADME](../configtls/README.md).\n\n- `endpoint`: address:port\n- [`tls`](../configtls/README.md)\n- `headers`: name/value pairs added to the HTTP request headers\n- [`read_buffer_size`](https://golang.org/pkg/net/http/#Transport)\n- [`timeout`](https://golang.org/pkg/net/http/#Client)\n- [`write_buffer_size`](https://golang.org/pkg/net/http/#Transport)\n- `compression`: Compression type to use among `gzip`, `zstd`, `snappy`, `zlib`, and `deflate`.\n - look at the documentation for the server-side of the communication.\n - `none` will be treated as uncompressed, and any other inputs will cause an error.\n- [`max_idle_conns`](https://golang.org/pkg/net/http/#Transport)\n- [`max_idle_conns_per_host`](https://golang.org/pkg/net/http/#Transport)\n- [`max_conns_per_host`](https://golang.org/pkg/net/http/#Transport)\n- [`idle_conn_timeout`](https://golang.org/pkg/net/http/#Transport)\n\nExample:\n\n```yaml\nexporter:\n otlp:\n endpoint: otelcol2:55690\n tls:\n ca_file: ca.pem\n cert_file: cert.pem\n key_file: key.pem\n headers:\n test1: \"value1\"\n \"test 2\": \"value 2\"\n compression: zstd\n```\n\n## Server Configuration\n\n[Receivers](https://github.com/open-telemetry/opentelemetry-collector/blob/main/receiver/README.md)\nleverage server configuration.\n\n- [`cors`](https://github.com/rs/cors#parameters): Configure [CORS][cors],\nallowing the receiver to accept traces from web browsers, even if the receiver\nis hosted at a different [origin][origin]. If left blank or set to `null`, CORS\nwill not be enabled.\n - `allowed_origins`: A list of [origins][origin] allowed to send requests to\n the receiver. An origin may contain a wildcard (`*`) to replace 0 or more\n characters (e.g., `https://*.example.com`). To allow any origin, set to\n `[\"*\"]`. If no origins are listed, CORS will not be enabled.\n - `allowed_headers`: Allow CORS requests to include headers outside the\n [default safelist][cors-headers]. By default, safelist headers and\n `X-Requested-With` will be allowed. To allow any request header, set to\n `[\"*\"]`.\n - `max_age`: Sets the value of the [`Access-Control-Max-Age`][cors-cache]\n header, allowing clients to cache the response to CORS preflight requests. If\n not set, browsers use a default of 5 seconds.\n- `endpoint`: Valid value syntax available [here](https://github.com/grpc/grpc/blob/master/doc/naming.md)\n- [`tls`](../configtls/README.md)\n\nYou can enable [`attribute processor`][attribute-processor] to append any http header to span's attribute using custom key. You also need to enable the \"include_metadata\"\n\nExample:\n\n```yaml\nreceivers:\n otlp:\n protocols:\n http:\n include_metadata: true\n cors:\n allowed_origins:\n - https://foo.bar.com\n - https://*.test.com\n allowed_headers:\n - Example-Header\n max_age: 7200\n endpoint: 0.0.0.0:55690\nprocessors:\n attributes:\n actions:\n - key: http.client_ip\n from_context: X-Forwarded-For\n action: upsert\n```\n\n[cors]: https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS\n[cors-headers]: https://developer.mozilla.org/en-US/docs/Glossary/CORS-safelisted_request_header\n[cors-cache]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Max-Age\n[origin]: https://developer.mozilla.org/en-US/docs/Glossary/Origin\n[attribute-processor]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/attributesprocessor/README.md","properties":{"CustomRoundTripper":{"description":"Custom Round Tripper to allow for individual components to intercept HTTP requests","title":"CustomRoundTripper"},"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing HTTP calls.","title":"auth"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"endpoint":{"description":"The target URL to send data to (e.g.: http://some.url:9411/v1/traces).","title":"endpoint","type":"string"},"headers":{"description":"Additional headers attached to each HTTP request sent by the client.\nExisting header values are overwritten if collision happens.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"idle_conn_timeout":{"description":"IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"idle_conn_timeout","type":"string"},"max_conns_per_host":{"description":"MaxConnsPerHost limits the total number of connections per host, including connections in the dialing,\nactive, and idle states.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_conns_per_host","type":"integer"},"max_idle_conns":{"description":"MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns","type":"integer"},"max_idle_conns_per_host":{"description":"MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open.\nThere's an already set value, and we want to override it only if an explicit value provided","title":"max_idle_conns_per_host","type":"integer"},"read_buffer_size":{"description":"ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize.","title":"read_buffer_size","type":"integer"},"timeout":{"description":"Timeout parameter configures `http.Client.Timeout`.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"write_buffer_size":{"description":"WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize.","title":"write_buffer_size","type":"integer"}},"type":"object"},"go.opentelemetry.io.collector.config.confighttp.HTTPServerSettings":{"additionalProperties":false,"description":"HTTPServerSettings defines settings for creating an HTTP server.","properties":{"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth for this receiver","title":"auth"},"cors":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.confighttp.CORSSettings","description":"CORS configures the server for HTTP cross-origin resource sharing (CORS).","title":"cors"},"endpoint":{"description":"Endpoint configures the listening address for the server.","title":"endpoint","type":"string"},"include_metadata":{"description":"IncludeMetadata propagates the client metadata from the incoming requests to the downstream consumers\nExperimental: *NOTE* this option is subject to change or removal in the future.","title":"include_metadata","type":"boolean"},"max_request_body_size":{"description":"MaxRequestBodySize sets the maximum request body size in bytes","title":"max_request_body_size","type":"integer"},"response_headers":{"description":"Additional headers attached to each HTTP response sent to the client.\nHeader values are opaque since they may be sensitive.","patternProperties":{".*":{"type":"string"}},"title":"response_headers","type":"object"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSServerSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"}},"type":"object"},"go.opentelemetry.io.collector.config.confignet.NetAddr":{"additionalProperties":false,"description":"NetAddr represents a network endpoint address.","markdownDescription":"# Network Configuration Settings\n\n[Receivers](https://github.com/open-telemetry/opentelemetry-collector/blob/main/receiver/README.md)\nleverage network configuration to set connection and transport information.\n\n- `endpoint`: Configures the address for this network connection. For TCP and\n UDP networks, the address has the form \"host:port\". The host must be a\n literal IP address, or a host name that can be resolved to IP addresses. The\n port must be a literal port number or a service name. If the host is a\n literal IPv6 address it must be enclosed in square brackets, as in\n \"[2001:db8::1]:80\" or \"[fe80::1%zone]:80\". The zone specifies the scope of\n the literal IPv6 address as defined in RFC 4007.\n- `transport`: Known protocols are \"tcp\", \"tcp4\" (IPv4-only), \"tcp6\"\n (IPv6-only), \"udp\", \"udp4\" (IPv4-only), \"udp6\" (IPv6-only), \"ip\", \"ip4\"\n (IPv4-only), \"ip6\" (IPv6-only), \"unix\", \"unixgram\" and \"unixpacket\".\n\nNote that for TCP receivers only the `endpoint` configuration setting is\nrequired.","properties":{"endpoint":{"description":"Endpoint configures the address for this network connection.\nFor TCP and UDP networks, the address has the form \"host:port\". The host must be a literal IP address,\nor a host name that can be resolved to IP addresses. The port must be a literal port number or a service name.\nIf the host is a literal IPv6 address it must be enclosed in square brackets, as in \"[2001:db8::1]:80\" or\n\"[fe80::1%zone]:80\". The zone specifies the scope of the literal IPv6 address as defined in RFC 4007.","title":"endpoint","type":"string"},"transport":{"description":"Transport to use. Known protocols are \"tcp\", \"tcp4\" (IPv4-only), \"tcp6\" (IPv6-only), \"udp\", \"udp4\" (IPv4-only),\n\"udp6\" (IPv6-only), \"ip\", \"ip4\" (IPv4-only), \"ip6\" (IPv6-only), \"unix\", \"unixgram\" and \"unixpacket\".","title":"transport","type":"string"}},"type":"object"},"go.opentelemetry.io.collector.config.configtls.TLSClientSetting":{"additionalProperties":false,"description":"TLSClientSetting contains TLS configurations that are specific to client connections in addition to the common configurations.","properties":{"ca_file":{"description":"Path to the CA cert. For a client this verifies the server certificate.\nFor a server this verifies client certificates. If empty uses system root CA.\n(optional)","title":"ca_file","type":"string"},"ca_pem":{"description":"In memory PEM encoded cert. (optional)","title":"ca_pem","type":"string"},"cert_file":{"description":"Path to the TLS cert to use for TLS required connections. (optional)","title":"cert_file","type":"string"},"cert_pem":{"description":"In memory PEM encoded TLS cert to use for TLS required connections. (optional)","title":"cert_pem","type":"string"},"insecure":{"description":"In gRPC when set to true, this is used to disable the client transport security.\nSee https://godoc.org/google.golang.org/grpc#WithInsecure.\nIn HTTP, this disables verifying the server's certificate chain and host name\n(InsecureSkipVerify in the tls Config). Please refer to\nhttps://godoc.org/crypto/tls#Config for more information.\n(optional, default false)","title":"insecure","type":"boolean"},"insecure_skip_verify":{"description":"InsecureSkipVerify will enable TLS but not verify the certificate.","title":"insecure_skip_verify","type":"boolean"},"key_file":{"description":"Path to the TLS key to use for TLS required connections. (optional)","title":"key_file","type":"string"},"key_pem":{"description":"In memory PEM encoded TLS key to use for TLS required connections. (optional)","title":"key_pem","type":"string"},"max_version":{"description":"MaxVersion sets the maximum TLS version that is acceptable.\nIf not set, refer to crypto/tls for defaults. (optional)","title":"max_version","type":"string"},"min_version":{"description":"MinVersion sets the minimum TLS version that is acceptable.\nIf not set, TLS 1.2 will be used. (optional)","title":"min_version","type":"string"},"reload_interval":{"description":"ReloadInterval specifies the duration after which the certificate will be reloaded\nIf not set, it will never be reloaded (optional)","title":"reload_interval","type":"string"},"server_name_override":{"description":"ServerName requested by client for virtual hosting.\nThis sets the ServerName in the TLSConfig. Please refer to\nhttps://godoc.org/crypto/tls#Config for more information. (optional)","title":"server_name_override","type":"string"}},"type":"object"},"go.opentelemetry.io.collector.config.configtls.TLSServerSetting":{"additionalProperties":false,"description":"TLSServerSetting contains TLS configurations that are specific to server connections in addition to the common configurations.","properties":{"ca_file":{"description":"Path to the CA cert. For a client this verifies the server certificate.\nFor a server this verifies client certificates. If empty uses system root CA.\n(optional)","title":"ca_file","type":"string"},"ca_pem":{"description":"In memory PEM encoded cert. (optional)","title":"ca_pem","type":"string"},"cert_file":{"description":"Path to the TLS cert to use for TLS required connections. (optional)","title":"cert_file","type":"string"},"cert_pem":{"description":"In memory PEM encoded TLS cert to use for TLS required connections. (optional)","title":"cert_pem","type":"string"},"client_ca_file":{"description":"Path to the TLS cert to use by the server to verify a client certificate. (optional)\nThis sets the ClientCAs and ClientAuth to RequireAndVerifyClientCert in the TLSConfig. Please refer to\nhttps://godoc.org/crypto/tls#Config for more information. (optional)","title":"client_ca_file","type":"string"},"client_ca_file_reload":{"description":"Reload the ClientCAs file when it is modified\n(optional, default false)","title":"client_ca_file_reload","type":"boolean"},"key_file":{"description":"Path to the TLS key to use for TLS required connections. (optional)","title":"key_file","type":"string"},"key_pem":{"description":"In memory PEM encoded TLS key to use for TLS required connections. (optional)","title":"key_pem","type":"string"},"max_version":{"description":"MaxVersion sets the maximum TLS version that is acceptable.\nIf not set, refer to crypto/tls for defaults. (optional)","title":"max_version","type":"string"},"min_version":{"description":"MinVersion sets the minimum TLS version that is acceptable.\nIf not set, TLS 1.2 will be used. (optional)","title":"min_version","type":"string"},"reload_interval":{"description":"ReloadInterval specifies the duration after which the certificate will be reloaded\nIf not set, it will never be reloaded (optional)","title":"reload_interval","type":"string"}},"type":"object"},"go.opentelemetry.io.collector.exporter.exporterhelper.QueueSettings":{"additionalProperties":false,"description":"QueueSettings defines configuration for queueing batches before sending to the consumerSender.","markdownDescription":"# Exporter Helper\n\nThis is a helper exporter that other exporters can depend on. Today, it primarily offers queued retry capabilities.\n\n\u003e :warning: This exporter should not be added to a service pipeline.\n\n## Configuration\n\nThe following configuration options can be modified:\n\n- `retry_on_failure`\n - `enabled` (default = true)\n - `initial_interval` (default = 5s): Time to wait after the first failure before retrying; ignored if `enabled` is `false`\n - `max_interval` (default = 30s): Is the upper bound on backoff; ignored if `enabled` is `false`\n - `max_elapsed_time` (default = 300s): Is the maximum amount of time spent trying to send a batch; ignored if `enabled` is `false`\n- `sending_queue`\n - `enabled` (default = true)\n - `num_consumers` (default = 10): Number of consumers that dequeue batches; ignored if `enabled` is `false`\n - `queue_size` (default = 1000): Maximum number of batches kept in memory before dropping; ignored if `enabled` is `false`\n User should calculate this as `num_seconds * requests_per_second / requests_per_batch` where:\n - `num_seconds` is the number of seconds to buffer in case of a backend outage\n - `requests_per_second` is the average number of requests per seconds\n - `requests_per_batch` is the average number of requests per batch (if \n [the batch processor](https://github.com/open-telemetry/opentelemetry-collector/tree/main/processor/batchprocessor)\n is used, the metric `batch_send_size` can be used for estimation)\n- `timeout` (default = 5s): Time to wait per individual attempt to send data to a backend\n\nThe `initial_interval`, `max_interval`, `max_elapsed_time`, and `timeout` options accept \n[duration strings](https://pkg.go.dev/time#ParseDuration),\nvalid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\".\n\n### Persistent Queue\n\n**Status: [alpha]**\n\n\u003e :warning: The capability is under development. To use it, a storage extension needs to be set up.\n\nTo use the persistent queue, the following setting needs to be set:\n\n- `sending_queue`\n - `storage` (default = none): When set, enables persistence and uses the component specified as a storage extension for the persistent queue\n\nThe maximum number of batches stored to disk can be controlled using `sending_queue.queue_size` parameter (which,\nsimilarly as for in-memory buffering, defaults to 1000 batches).\n\nWhen persistent queue is enabled, the batches are being buffered using the provided storage extension - [filestorage] is a popular and safe choice. If the collector instance is killed while having some items in the persistent queue, on restart the items will be be picked and the exporting is continued.\n\n```\n ┌─Consumer #1─┐\n │ ┌───┐ │\n ──────Deleted────── ┌───►│ │ 1 │ ├───► Success\n Waiting in channel x x x │ │ └───┘ │\n for consumer ───┐ x x x │ │ │\n │ x x x │ └─────────────┘\n ▼ x x x │\n┌─────────────────────────────────────────x─────x───┐ │ ┌─Consumer #2─┐\n│ x x x │ │ │ ┌───┐ │\n│ ┌───┐ ┌───┐ ┌───┐ ┌─x─┐ ┌───┐ ┌─x─┐ ┌─x─┐ │ │ │ │ 2 │ ├───► Permanent -\u003e X\n│ n+1 │ n │ ... │ 6 │ │ 5 │ │ 4 │ │ 3 │ │ 2 │ │ 1 │ ├────┼───►│ └───┘ │ failure\n│ └───┘ └───┘ └───┘ └───┘ └───┘ └───┘ └───┘ │ │ │ │\n│ │ │ └─────────────┘\n└───────────────────────────────────────────────────┘ │\n ▲ ▲ ▲ ▲ │ ┌─Consumer #3─┐\n │ │ │ │ │ │ ┌───┐ │\n │ │ │ │ │ │ │ 3 │ ├───► (in progress)\n write read └─────┬─────┘ ├───►│ └───┘ │\n index index │ │ │ │\n ▲ │ │ └─────────────┘\n │ │ │\n │ currently │ ┌─Consumer #4─┐\n │ dispatched │ │ ┌───┐ │ Temporary\n │ └───►│ │ 4 │ ├───► failure\n │ │ └───┘ │ │\n │ │ │ │\n │ └─────────────┘ │\n │ ▲ │\n │ └── Retry ───────┤\n │ │\n │ │\n └────────────────────────────────────── Requeuing ◄────── Retry limit exceeded ───┘\n```\n\nExample:\n\n```\nreceivers:\n otlp:\n protocols:\n grpc:\nexporters:\n otlp:\n endpoint: \u003cENDPOINT\u003e\n sending_queue:\n storage: file_storage/otc\nextensions:\n file_storage/otc:\n directory: /var/lib/storage/otc\n timeout: 10s\nservice:\n extensions: [file_storage]\n pipelines:\n metrics:\n receivers: [otlp]\n exporters: [otlp]\n logs:\n receivers: [otlp]\n exporters: [otlp]\n traces:\n receivers: [otlp]\n exporters: [otlp]\n\n```\n\n[filestorage]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/extension/storage/filestorage\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha","properties":{"enabled":{"description":"Enabled indicates whether to not enqueue batches before sending to the consumerSender.","title":"enabled","type":"boolean"},"num_consumers":{"description":"NumConsumers is the number of consumers from the queue.","title":"num_consumers","type":"integer"},"queue_size":{"description":"QueueSize is the maximum number of batches allowed in queue at a given time.","title":"queue_size","type":"integer"},"storage":{"description":"StorageID if not empty, enables the persistent storage and uses the component specified\nas a storage extension for the persistent queue","title":"storage","type":"string"}},"type":"object"},"go.opentelemetry.io.collector.exporter.exporterhelper.RetrySettings":{"additionalProperties":false,"description":"RetrySettings defines configuration for retrying batches in case of export failure.","properties":{"enabled":{"description":"Enabled indicates whether to not retry sending batches in case of export failure.","title":"enabled","type":"boolean"},"initial_interval":{"description":"InitialInterval the time to wait after the first failure before retrying.","title":"initial_interval","type":"string"},"max_elapsed_time":{"description":"MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request/batch.\nOnce this value is reached, the data is discarded.","title":"max_elapsed_time","type":"string"},"max_interval":{"description":"MaxInterval is the upper bound on backoff interval. Once this value is reached the delay between\nconsecutive retries will always be `MaxInterval`.","title":"max_interval","type":"string"},"multiplier":{"description":"Multiplier is the value multiplied by the backoff interval bounds","title":"multiplier","type":"number"},"randomization_factor":{"description":"RandomizationFactor is a random factor used to calculate next backoffs\nRandomized interval = RetryInterval * (1 ± RandomizationFactor)","title":"randomization_factor","type":"number"}},"type":"object"},"go.opentelemetry.io.collector.exporter.exporterhelper.TimeoutSettings":{"additionalProperties":false,"description":"TimeoutSettings for timeout.","markdownDescription":"# Exporter Helper\n\nThis is a helper exporter that other exporters can depend on. Today, it primarily offers queued retry capabilities.\n\n\u003e :warning: This exporter should not be added to a service pipeline.\n\n## Configuration\n\nThe following configuration options can be modified:\n\n- `retry_on_failure`\n - `enabled` (default = true)\n - `initial_interval` (default = 5s): Time to wait after the first failure before retrying; ignored if `enabled` is `false`\n - `max_interval` (default = 30s): Is the upper bound on backoff; ignored if `enabled` is `false`\n - `max_elapsed_time` (default = 300s): Is the maximum amount of time spent trying to send a batch; ignored if `enabled` is `false`\n- `sending_queue`\n - `enabled` (default = true)\n - `num_consumers` (default = 10): Number of consumers that dequeue batches; ignored if `enabled` is `false`\n - `queue_size` (default = 1000): Maximum number of batches kept in memory before dropping; ignored if `enabled` is `false`\n User should calculate this as `num_seconds * requests_per_second / requests_per_batch` where:\n - `num_seconds` is the number of seconds to buffer in case of a backend outage\n - `requests_per_second` is the average number of requests per seconds\n - `requests_per_batch` is the average number of requests per batch (if \n [the batch processor](https://github.com/open-telemetry/opentelemetry-collector/tree/main/processor/batchprocessor)\n is used, the metric `batch_send_size` can be used for estimation)\n- `timeout` (default = 5s): Time to wait per individual attempt to send data to a backend\n\nThe `initial_interval`, `max_interval`, `max_elapsed_time`, and `timeout` options accept \n[duration strings](https://pkg.go.dev/time#ParseDuration),\nvalid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\".\n\n### Persistent Queue\n\n**Status: [alpha]**\n\n\u003e :warning: The capability is under development. To use it, a storage extension needs to be set up.\n\nTo use the persistent queue, the following setting needs to be set:\n\n- `sending_queue`\n - `storage` (default = none): When set, enables persistence and uses the component specified as a storage extension for the persistent queue\n\nThe maximum number of batches stored to disk can be controlled using `sending_queue.queue_size` parameter (which,\nsimilarly as for in-memory buffering, defaults to 1000 batches).\n\nWhen persistent queue is enabled, the batches are being buffered using the provided storage extension - [filestorage] is a popular and safe choice. If the collector instance is killed while having some items in the persistent queue, on restart the items will be be picked and the exporting is continued.\n\n```\n ┌─Consumer #1─┐\n │ ┌───┐ │\n ──────Deleted────── ┌───►│ │ 1 │ ├───► Success\n Waiting in channel x x x │ │ └───┘ │\n for consumer ───┐ x x x │ │ │\n │ x x x │ └─────────────┘\n ▼ x x x │\n┌─────────────────────────────────────────x─────x───┐ │ ┌─Consumer #2─┐\n│ x x x │ │ │ ┌───┐ │\n│ ┌───┐ ┌───┐ ┌───┐ ┌─x─┐ ┌───┐ ┌─x─┐ ┌─x─┐ │ │ │ │ 2 │ ├───► Permanent -\u003e X\n│ n+1 │ n │ ... │ 6 │ │ 5 │ │ 4 │ │ 3 │ │ 2 │ │ 1 │ ├────┼───►│ └───┘ │ failure\n│ └───┘ └───┘ └───┘ └───┘ └───┘ └───┘ └───┘ │ │ │ │\n│ │ │ └─────────────┘\n└───────────────────────────────────────────────────┘ │\n ▲ ▲ ▲ ▲ │ ┌─Consumer #3─┐\n │ │ │ │ │ │ ┌───┐ │\n │ │ │ │ │ │ │ 3 │ ├───► (in progress)\n write read └─────┬─────┘ ├───►│ └───┘ │\n index index │ │ │ │\n ▲ │ │ └─────────────┘\n │ │ │\n │ currently │ ┌─Consumer #4─┐\n │ dispatched │ │ ┌───┐ │ Temporary\n │ └───►│ │ 4 │ ├───► failure\n │ │ └───┘ │ │\n │ │ │ │\n │ └─────────────┘ │\n │ ▲ │\n │ └── Retry ───────┤\n │ │\n │ │\n └────────────────────────────────────── Requeuing ◄────── Retry limit exceeded ───┘\n```\n\nExample:\n\n```\nreceivers:\n otlp:\n protocols:\n grpc:\nexporters:\n otlp:\n endpoint: \u003cENDPOINT\u003e\n sending_queue:\n storage: file_storage/otc\nextensions:\n file_storage/otc:\n directory: /var/lib/storage/otc\n timeout: 10s\nservice:\n extensions: [file_storage]\n pipelines:\n metrics:\n receivers: [otlp]\n exporters: [otlp]\n logs:\n receivers: [otlp]\n exporters: [otlp]\n traces:\n receivers: [otlp]\n exporters: [otlp]\n\n```\n\n[filestorage]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/extension/storage/filestorage\n[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha","properties":{"timeout":{"description":"Timeout is the timeout for every attempt to send data to the backend.","title":"timeout","type":"string"}},"type":"object"},"go.opentelemetry.io.collector.exporter.otlpexporter.Config":{"additionalProperties":false,"description":"Config defines configuration for OTLP exporter.","markdownDescription":"# OTLP gRPC Exporter\n\n| Status | |\n| ------------------------ | --------------------- |\n| Stability | traces [stable] |\n| | metrics [stable] |\n| | logs [beta] |\n| Supported pipeline types | traces, metrics, logs |\n| Distributions | [core], [contrib] |\n\nExport data via gRPC using [OTLP](\nhttps://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/otlp.md)\nformat. By default, this exporter requires TLS and offers queued retry capabilities.\n\n## Getting Started\n\nThe following settings are required:\n\n- `endpoint` (no default): host:port to which the exporter is going to send OTLP trace data,\nusing the gRPC protocol. The valid syntax is described\n[here](https://github.com/grpc/grpc/blob/master/doc/naming.md).\nIf a scheme of `https` is used then client transport security is enabled and overrides the `insecure` setting.\n- `tls`: see [TLS Configuration Settings](../../config/configtls/README.md) for the full set of available options.\n\nExample:\n\n```yaml\nexporters:\n otlp:\n endpoint: otelcol2:4317\n tls:\n cert_file: file.cert\n key_file: file.key\n otlp/2:\n endpoint: otelcol2:4317\n tls:\n insecure: true\n```\n\nBy default, `gzip` compression is enabled. See [compression comparison](../../config/configgrpc/README.md#compression-comparison) for details benchmark information. To disable, configure as follows:\n\n```yaml\nexporters:\n otlp:\n ...\n compression: none\n```\n\n## Advanced Configuration\n\nSeveral helper files are leveraged to provide additional capabilities automatically:\n\n- [gRPC settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configgrpc/README.md)\n- [TLS and mTLS settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md)\n- [Queuing, retry and timeout settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md)\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol\n[stable]: https://github.com/open-telemetry/opentelemetry-collector#stable","properties":{"auth":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configauth.Authentication","description":"Auth configuration for outgoing RPCs.","title":"auth"},"balancer_name":{"description":"Sets the balancer in grpclb_policy to discover the servers. Default is pick_first.\nhttps://github.com/grpc/grpc-go/blob/master/examples/features/load_balancing/README.md","title":"balancer_name","type":"string"},"compression":{"description":"The compression key for supported compression types within collector.","title":"compression","type":"string"},"endpoint":{"description":"The target to which the exporter is going to send traces or metrics,\nusing the gRPC protocol. The valid syntax is described at\nhttps://github.com/grpc/grpc/blob/master/doc/naming.md.","title":"endpoint","type":"string"},"headers":{"description":"The headers associated with gRPC requests.","patternProperties":{".*":{"type":"string"}},"title":"headers","type":"object"},"keepalive":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configgrpc.KeepaliveClientConfig","description":"The keepalive parameters for gRPC client. See grpc.WithKeepaliveParams.\n(https://godoc.org/google.golang.org/grpc#WithKeepaliveParams).","title":"keepalive"},"read_buffer_size":{"description":"ReadBufferSize for gRPC client. See grpc.WithReadBufferSize.\n(https://godoc.org/google.golang.org/grpc#WithReadBufferSize).","title":"read_buffer_size","type":"integer"},"retry_on_failure":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.RetrySettings","title":"retry_on_failure"},"sending_queue":{"$ref":"#/$defs/go.opentelemetry.io.collector.exporter.exporterhelper.QueueSettings","title":"sending_queue"},"timeout":{"description":"Timeout is the timeout for every attempt to send data to the backend.","title":"timeout","type":"string"},"tls":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configtls.TLSClientSetting","description":"TLSSetting struct exposes TLS client configuration.","title":"tls"},"wait_for_ready":{"description":"WaitForReady parameter configures client to wait for ready state before sending data.\n(https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md)","title":"wait_for_ready","type":"boolean"},"write_buffer_size":{"description":"WriteBufferSize for gRPC gRPC. See grpc.WithWriteBufferSize.\n(https://godoc.org/google.golang.org/grpc#WithWriteBufferSize).","title":"write_buffer_size","type":"integer"}},"type":"object"},"go.opentelemetry.io.collector.extension.ballastextension.Config":{"additionalProperties":false,"description":"Config has the configuration for the ballast extension.","markdownDescription":"# Memory Ballast\n\n| Status | |\n| ------------------------ | ----------------- |\n| Stability | [beta] |\n| Distributions | [core], [contrib] |\n\nMemory Ballast extension enables applications to configure memory ballast for the process. For more details see:\n- [Go memory ballast blogpost](https://web.archive.org/web/20210929130001/https://blog.twitch.tv/en/2019/04/10/go-memory-ballast-how-i-learnt-to-stop-worrying-and-love-the-heap-26c2462549a2/)\n- [Golang issue related to this](https://github.com/golang/go/issues/23044)\n\nThe following settings can be configured:\n\n- `size_mib` (default = 0, disabled): Is the memory ballast size, in MiB. \n Takes higher priority than `size_in_percentage` if both are specified at the same time.\n- `size_in_percentage` (default = 0, disabled): Set the memory ballast based on the \n total memory in percentage, value range is `1-100`. \n It is supported in both containerized(eg, docker, k8s) and physical host environments.\n \n**How ballast size is calculated with percentage configuration**\nWhen `size_in_percentage` is enabled with the value(1-100), the absolute `ballast_size` will be calculated by\n`size_in_percentage * totalMemory / 100`. The `totalMemory` can be retrieved for hosts and containers(in docker, k8s, etc) by the following steps,\n1. Look up Memory Cgroup subsystem on the target host or container, find out if there is any total memory limitation has been set for the running collector process.\n Check the value in `memory.limit_in_bytes` file under cgroup memory files (eg, `/sys/fs/cgroup/memory/memory.limit_in_bytes`).\n\n2. If `memory.limit_in_bytes` is positive value other than `9223372036854771712`(`0x7FFFFFFFFFFFF000`). The `ballast_size`\n will be calculated by `memory.limit_in_bytes * size_in_percentage / 100`.\n If `memory.limit_in_bytes` value is `9223372036854771712`(`0x7FFFFFFFFFFFF000`), it indicates there is no memory limit has\n been set for the collector process or the running container in cgroup. Then the `totalMemory` will be determined in next step.\n \n3. if there is no memory limit set in cgroup for the collector process or container where the collector is running. The total memory will be\n calculated by `github.com/shirou/gopsutil/v3/mem`[[link]](https://github.com/shirou/gopsutil/) on `mem.VirtualMemory().total` which is supported in multiple OS systems.\n\n\nExample:\nConfig that uses 64 Mib of memory for the ballast:\n```yaml\nextensions:\n memory_ballast:\n size_mib: 64\n```\n\nConfig that uses 20% of the total memory for the ballast:\n```yaml\nextensions:\n memory_ballast:\n size_in_percentage: 20\n```\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector-contrib#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol","properties":{"size_in_percentage":{"description":"SizeInPercentage is the maximum amount of memory ballast, in %, targeted to be\nallocated. The fixed memory settings SizeMiB has a higher precedence.","title":"size_in_percentage","type":"integer"},"size_mib":{"description":"SizeMiB is the size, in MiB, of the memory ballast\nto be created for this process.","title":"size_mib","type":"integer"}},"type":"object"},"go.opentelemetry.io.collector.extension.zpagesextension.Config":{"additionalProperties":false,"description":"Config has the configuration for the extension enabling the zPages extension.","markdownDescription":"# zPages\n\n| Status | |\n| ------------------------ | ----------------- |\n| Stability | [beta] |\n| Distributions | [core], [contrib] |\n\nEnables an extension that serves zPages, an HTTP endpoint that provides live\ndata for debugging different components that were properly instrumented for such.\nAll core exporters and receivers provide some zPage instrumentation.\n\nzPages are useful for in-process diagnostics without having to depend on any\nbackend to examine traces or metrics. \n\nThe following settings are required:\n\n- `endpoint` (default = localhost:55679): Specifies the HTTP endpoint that serves\nzPages. Use localhost:\u003cport\u003e to make it available only locally, or \":\u003cport\u003e\" to\nmake it available on all network interfaces.\n\nExample:\n```yaml\nextensions:\n zpages:\n```\n\nThe full list of settings exposed for this exporter are documented [here](./config.go)\nwith detailed sample configurations [here](./testdata/config.yaml).\n\n## Exposed zPages routes\n\nThe collector exposes the following zPage routes:\n\n### ServiceZ\n\nServiceZ gives an overview of the collector services and quick access to the\n`pipelinez`, `extensionz`, and `featurez` zPages. The page also provides build \nand runtime information.\n\nExample URL: http://localhost:55679/debug/servicez\n\n### PipelineZ\n\nPipelineZ brings insight on the running pipelines running in the collector. You can\nfind information on type, if data is mutated and the receivers, processors and exporters\nthat are used for each pipeline.\n\nExample URL: http://localhost:55679/debug/pipelinez\n\n### ExtensionZ\n\nExtensionZ shows the extensions that are active in the collector.\n\nExample URL: http://localhost:55679/debug/extensionz\n\n### FeatureZ\n\nFeatureZ lists the feature gates available along with their current status \nand description.\n\nExample URL: http://localhost:55679/debug/featurez\n\n### TraceZ\nThe TraceZ route is available to examine and bucketize spans by latency buckets for \nexample\n\n(0us, 10us, 100us, 1ms, 10ms, 100ms, 1s, 10s, 1m]\nThey also allow you to quickly examine error samples\n\nExample URL: http://localhost:55679/debug/tracez\n\n### RpcZ\nThe Rpcz route is available to help examine statistics of remote procedure calls (RPCs) \nthat are properly instrumented. For example when using gRPC\n\nExample URL: http://localhost:55679/debug/rpcz\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector-contrib#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol","properties":{"endpoint":{"description":"Endpoint configures the address for this network connection.\nThe address has the form \"host:port\". The host must be a literal IP address, or a host name that can be\nresolved to IP addresses. The port must be a literal port number or a service name.\nIf the host is a literal IPv6 address it must be enclosed in square brackets, as in \"[2001:db8::1]:80\" or\n\"[fe80::1%zone]:80\". The zone specifies the scope of the literal IPv6 address as defined in RFC 4007.","title":"endpoint","type":"string"}},"type":"object"},"go.opentelemetry.io.collector.processor.batchprocessor.Config":{"additionalProperties":false,"description":"Config defines configuration for batch processor.","markdownDescription":"# Batch Processor\n\n| Status | |\n| ------------------------ | --------------------- |\n| Stability | traces [beta] |\n| | metrics [beta] |\n| | logs [beta] |\n| Supported pipeline types | traces, metrics, logs |\n| Distributions | [core], [contrib] |\n\nThe batch processor accepts spans, metrics, or logs and places them into\nbatches. Batching helps better compress the data and reduce the number of\noutgoing connections required to transmit the data. This processor supports\nboth size and time based batching.\n\nIt is highly recommended to configure the batch processor on every collector.\nThe batch processor should be defined in the pipeline after the `memory_limiter`\nas well as any sampling processors. This is because batching should happen after\nany data drops such as sampling.\n\nPlease refer to [config.go](./config.go) for the config spec.\n\nThe following configuration options can be modified:\n- `send_batch_size` (default = 8192): Number of spans, metric data points, or log\nrecords after which a batch will be sent regardless of the timeout. `send_batch_size`\nacts as a trigger and does not affect the size of the batch. If you need to\nenforce batch size limits sent to the next component in the pipeline\nsee `send_batch_max_size`.\n- `timeout` (default = 200ms): Time duration after which a batch will\nbe sent regardless of size. If set to zero, `send_batch_size` is\nignored as data will be sent immediately, subject to only `send_batch_max_size`.\n- `send_batch_max_size` (default = 0): The upper limit of the batch size.\n `0` means no upper limit of the batch size.\n This property ensures that larger batches are split into smaller units.\n It must be greater than or equal to `send_batch_size`.\n- `metadata_keys` (default = empty): When set, this processor will\n create one batcher instance per distinct combination of values in\n the `client.Metadata`.\n- `metadata_cardinality_limit` (default = 1000): When `metadata_keys` is \n not empty, this setting limits the number of unique combinations of \n metadata key values that will be processed over the lifetime of the\n process.\n\nSee notes about metadata batching below.\n\nExamples:\n\nThis configuration contains one default batch processor and a second\nwith custom settings. The `batch/2` processor will buffer up to 10000\nspans, metric data points, or log records for up to 10 seconds without\nsplitting data items to enforce a maximum batch size.\n\n```yaml\nprocessors:\n batch:\n batch/2:\n send_batch_size: 10000\n timeout: 10s\n```\n\nThis configuration will enforce a maximum batch size limit of 10000\nspans, metric data points, or log records without introducing any\nartificial delays.\n\n```yaml\nprocessors:\n batch:\n send_batch_max_size: 10000\n timeout: 0s\n```\n\nRefer to [config.yaml](./testdata/config.yaml) for detailed\nexamples on using the processor.\n\n## Batching and client metadata\n\nBatching by metadata enables support for multi-tenant OpenTelemetry\nCollector pipelines with batching over groups of data having the same\nauthorization metadata. For example:\n\n```yaml\nprocessors:\n batch:\n # batch data by tenant-id\n metadata_keys:\n - tenant_id\n\n # limit to 10 batcher processes before raising errors\n metadata_cardinality_limit: 10\n```\n\nReceivers should be configured with `include_metadata: true` so that\nmetadata keys are available to the processor.\n\nNote that each distinct combination of metadata triggers the\nallocation of a new background task in the Collector that runs for the\nlifetime of the process, and each background task holds one pending\nbatch of up to `send_batch_size` records. Batching by metadata can\ntherefore substantially increase the amount of memory dedicated to\nbatching.\n\nThe maximum number of distinct combinations is limited to the\nconfigured `metadata_cardinality_limit`, which defaults to 1000 to\nlimit memory impact.\n\nUsers of the batching processor configured with metadata keys should\nconsider use of an Auth extension to validate the relevant\nmetadata-key values.\n\nThe number of batch processors currently in use is exported as the\n`otelcol_processor_batch_metadata_cardinality` metric.\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol","properties":{"metadata_cardinality_limit":{"description":"MetadataCardinalityLimit indicates the maximum number of\nbatcher instances that will be created through a distinct\ncombination of MetadataKeys.","title":"metadata_cardinality_limit","type":"integer"},"metadata_keys":{"description":"MetadataKeys is a list of client.Metadata keys that will be\nused to form distinct batchers. If this setting is empty,\na single batcher instance will be used. When this setting\nis not empty, one batcher will be used per distinct\ncombination of values for the listed metadata keys.\n\nEmpty value and unset metadata are treated as distinct cases.\n\nEntries are case-insensitive. Duplicated entries will\ntrigger a validation error.","items":{"type":"string"},"title":"metadata_keys","type":"array"},"send_batch_max_size":{"description":"SendBatchMaxSize is the maximum size of a batch. It must be larger than SendBatchSize.\nLarger batches are split into smaller units.\nDefault value is 0, that means no maximum size.","title":"send_batch_max_size","type":"integer"},"send_batch_size":{"description":"SendBatchSize is the size of a batch which after hit, will trigger it to be sent.\nWhen this is set to zero, the batch size is ignored and data will be sent immediately\nsubject to only send_batch_max_size.","title":"send_batch_size","type":"integer"},"timeout":{"description":"Timeout sets the time after which a batch will be sent regardless of size.\nWhen this is set to zero, batched data will be sent immediately.","title":"timeout","type":"string"}},"type":"object"},"go.opentelemetry.io.collector.processor.memorylimiterprocessor.Config":{"additionalProperties":false,"description":"Config defines configuration for memory memoryLimiter processor.","markdownDescription":"# Memory Limiter Processor\n\n| Status | |\n| ------------------------ | --------------------- |\n| Stability | [beta] |\n| Supported pipeline types | traces, metrics, logs |\n| Distributions | [core], [contrib] |\n\nThe memory limiter processor is used to prevent out of memory situations on\nthe collector. Given that the amount and type of data the collector processes is\nenvironment specific and resource utilization of the collector is also dependent\non the configured processors, it is important to put checks in place regarding\nmemory usage.\n \nThe memory_limiter processor allows to perform periodic checks of memory\nusage if it exceeds defined limits will begin refusing data and forcing GC to reduce\nmemory consumption.\n\nThe memory_limiter uses soft and hard memory limits. Hard limit is always above or equal\nthe soft limit.\n\nWhen the memory usage exceeds the soft limit the processor will enter the memory limited\nmode and will start refusing the data by returning errors to the preceding component\nin the pipeline that made the ConsumeLogs/Trace/Metrics function call.\nThe preceding component should be normally a receiver.\n\nIn memory limited mode the error returned by ConsumeLogs/Trace/Metrics function is a\nnon-permanent error. When receivers see this error they are expected to retry sending\nthe same data. The receivers may also apply a backpressure to their data sources\nin order to slow down the inflow of data into the Collector and allow the memory usage\nto go below the limits.\n\n\u003eWarning: if the component preceding the memory limiter in the pipeline does not correctly\nretry and send the data again after ConsumeLogs/Trace/Metrics functions return then that\ndata will be permanently lost. We consider such components incorrectly implemented.\n\nWhen the memory usage is above the hard limit in addition to refusing the data the\nprocessor will forcedly perform garbage collection in order to try to free memory.\n\nWhen the memory usage drop below the soft limit, the normal operation is resumed (data\nwill no longer be refused and no forced garbage collection will be performed).\n\nThe difference between the soft limit and hard limits is defined via `spike_limit_mib`\nconfiguration option. The value of this option should be selected in a way that ensures\nthat between the memory check intervals the memory usage cannot increase by more than this\nvalue (otherwise memory usage may exceed the hard limit - even if temporarily).\nA good starting point for `spike_limit_mib` is 20% of the hard limit. Bigger\n`spike_limit_mib` values may be necessary for spiky traffic or for longer check intervals.\n\nNote that while the processor can help mitigate out of memory situations,\nit is not a replacement for properly sizing and configuring the\ncollector. Keep in mind that if the soft limit is crossed, the collector will\nreturn errors to all receive operations until enough memory is freed. This may\neventually result in dropped data since the receivers may not be able to hold back\nand retry the data indefinitely.\n\nIt is highly recommended to configure `ballastextension` as well as the\n`memory_limiter` processor on every collector. The ballast should be configured to\nbe 1/3 to 1/2 of the memory allocated to the collector. The memory_limiter\nprocessor should be the first processor defined in the pipeline (immediately after\nthe receivers). This is to ensure that backpressure can be sent to applicable\nreceivers and minimize the likelihood of dropped data when the memory_limiter gets\ntriggered.\n\nPlease refer to [config.go](./config.go) for the config spec.\n\nThe following configuration options **must be changed**:\n- `check_interval` (default = 0s): Time between measurements of memory\nusage. The recommended value is 1 second.\nIf the expected traffic to the Collector is very spiky then decrease the `check_interval`\nor increase `spike_limit_mib` to avoid memory usage going over the hard limit.\n- `limit_mib` (default = 0): Maximum amount of memory, in MiB, targeted to be\nallocated by the process heap. Note that typically the total memory usage of\nprocess will be about 50MiB higher than this value. This defines the hard limit.\n- `spike_limit_mib` (default = 20% of `limit_mib`): Maximum spike expected between the\nmeasurements of memory usage. The value must be less than `limit_mib`. The soft limit\nvalue will be equal to (limit_mib - spike_limit_mib).\nThe recommended value for `spike_limit_mib` is about 20% `limit_mib`.\n- `limit_percentage` (default = 0): Maximum amount of total memory targeted to be\nallocated by the process heap. This configuration is supported on Linux systems with cgroups\nand it's intended to be used in dynamic platforms like docker.\nThis option is used to calculate `memory_limit` from the total available memory.\nFor instance setting of 75% with the total memory of 1GiB will result in the limit of 750 MiB.\nThe fixed memory setting (`limit_mib`) takes precedence\nover the percentage configuration.\n- `spike_limit_percentage` (default = 0): Maximum spike expected between the\nmeasurements of memory usage. The value must be less than `limit_percentage`.\nThis option is used to calculate `spike_limit_mib` from the total available memory.\nFor instance setting of 25% with the total memory of 1GiB will result in the spike limit of 250MiB.\nThis option is intended to be used only with `limit_percentage`.\n\nExamples:\n\n```yaml\nprocessors:\n memory_limiter:\n check_interval: 1s\n limit_mib: 4000\n spike_limit_mib: 800\n```\n\n```yaml\nprocessors:\n memory_limiter:\n check_interval: 1s\n limit_percentage: 50\n spike_limit_percentage: 30\n```\n\nRefer to [config.yaml](./testdata/config.yaml) for detailed\nexamples on using the processor.\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol","properties":{"check_interval":{"description":"CheckInterval is the time between measurements of memory usage for the\npurposes of avoiding going over the limits. Defaults to zero, so no\nchecks will be performed.","title":"check_interval","type":"string"},"limit_mib":{"description":"MemoryLimitMiB is the maximum amount of memory, in MiB, targeted to be\nallocated by the process.","title":"limit_mib","type":"integer"},"limit_percentage":{"description":"MemoryLimitPercentage is the maximum amount of memory, in %, targeted to be\nallocated by the process. The fixed memory settings MemoryLimitMiB has a higher precedence.","title":"limit_percentage","type":"integer"},"spike_limit_mib":{"description":"MemorySpikeLimitMiB is the maximum, in MiB, spike expected between the\nmeasurements of memory usage.","title":"spike_limit_mib","type":"integer"},"spike_limit_percentage":{"description":"MemorySpikePercentage is the maximum, in percents against the total memory,\nspike expected between the measurements of memory usage.","title":"spike_limit_percentage","type":"integer"}},"type":"object"},"go.opentelemetry.io.collector.receiver.otlpreceiver.Config":{"additionalProperties":false,"description":"Config defines configuration for OTLP receiver.","properties":{"protocols":{"$ref":"#/$defs/go.opentelemetry.io.collector.receiver.otlpreceiver.Protocols","title":"protocols"}},"type":"object"},"go.opentelemetry.io.collector.receiver.otlpreceiver.Protocols":{"additionalProperties":false,"description":"Protocols is the configuration for the supported protocols.","markdownDescription":"# OTLP Receiver\n\n| Status | |\n| ------------------------ | --------------------- |\n| Stability | traces [stable] |\n| | metrics [stable] |\n| | logs [beta] |\n| Supported pipeline types | traces, metrics, logs |\n| Distributions | [core], [contrib] |\n\nReceives data via gRPC or HTTP using [OTLP](\nhttps://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/otlp.md)\nformat.\n\n## Getting Started\n\nAll that is required to enable the OTLP receiver is to include it in the\nreceiver definitions. A protocol can be disabled by simply not specifying it in\nthe list of protocols.\n\n```yaml\nreceivers:\n otlp:\n protocols:\n grpc:\n http:\n```\n\nThe following settings are configurable:\n\n- `endpoint` (default = 0.0.0.0:4317 for grpc protocol, 0.0.0.0:4318 http protocol):\n host:port to which the receiver is going to receive data. The valid syntax is\n described at https://github.com/grpc/grpc/blob/master/doc/naming.md.\n\n## Advanced Configuration\n\nSeveral helper files are leveraged to provide additional capabilities automatically:\n\n- [gRPC settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configgrpc/README.md) including CORS\n- [TLS and mTLS settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md)\n\n## Writing with HTTP/JSON\n\nThe OTLP receiver can receive trace export calls via HTTP/JSON in addition to\ngRPC. The HTTP/JSON address is the same as gRPC as the protocol is recognized\nand processed accordingly. Note the serialization format needs to be [protobuf JSON](https://developers.google.com/protocol-buffers/docs/proto3#json).\n\nTo write traces with HTTP/JSON, `POST` to `[address]/v1/traces` for traces,\nto `[address]/v1/metrics` for metrics, to `[address]/v1/logs` for logs. The default\nport is `4318`.\n\n### CORS (Cross-origin resource sharing)\n\nThe HTTP/JSON endpoint can also optionally configure [CORS][cors] under `cors:`.\nSpecify what origins (or wildcard patterns) to allow requests from as\n`allowed_origins`. To allow additional request headers outside of the [default\nsafelist][cors-headers], set `allowed_headers`. Browsers can be instructed to\n[cache][cors-max-age] responses to preflight requests by setting `max_age`.\n\n[cors]: https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS\n[cors-headers]: https://developer.mozilla.org/en-US/docs/Glossary/CORS-safelisted_request_header\n[cors-max-age]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Max-Age\n\n```yaml\nreceivers:\n otlp:\n protocols:\n http:\n endpoint: \"localhost:4318\"\n cors:\n allowed_origins:\n - http://test.com\n # Origins can have wildcards with *, use * by itself to match any origin.\n - https://*.example.com\n allowed_headers:\n - Example-Header\n max_age: 7200\n```\n\n[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta\n[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib\n[core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol\n[stable]: https://github.com/open-telemetry/opentelemetry-collector#stable","properties":{"grpc":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.configgrpc.GRPCServerSettings","title":"grpc"},"http":{"$ref":"#/$defs/go.opentelemetry.io.collector.config.confighttp.HTTPServerSettings","title":"http"}},"type":"object"},"go.opentelemetry.io.collector.service.Config":{"additionalProperties":false,"properties":{"extensions":{"$ref":"#/$defs/go.opentelemetry.io.collector.service.extensions.Config","title":"extensions"},"pipelines":{"$ref":"#/$defs/go.opentelemetry.io.collector.service.pipelines.Config","title":"pipelines"},"telemetry":{"$ref":"#/$defs/go.opentelemetry.io.collector.service.telemetry.Config","title":"telemetry"}},"type":"object"},"go.opentelemetry.io.collector.service.extensions.Config":{"items":{"type":"string"},"type":"array"},"go.opentelemetry.io.collector.service.pipelines.Config":{"patternProperties":{".*":{"$ref":"#/$defs/go.opentelemetry.io.collector.service.pipelines.PipelineConfig"}},"type":"object"},"go.opentelemetry.io.collector.service.pipelines.PipelineConfig":{"additionalProperties":false,"properties":{"exporters":{"items":{"type":"string"},"title":"exporters","type":"array"},"processors":{"items":{"type":"string"},"title":"processors","type":"array"},"receivers":{"items":{"type":"string"},"title":"receivers","type":"array"}},"type":"object"},"go.opentelemetry.io.collector.service.telemetry.Config":{"additionalProperties":false,"properties":{"logs":{"$ref":"#/$defs/go.opentelemetry.io.collector.service.telemetry.LogsConfig","title":"logs"},"metrics":{"$ref":"#/$defs/go.opentelemetry.io.collector.service.telemetry.MetricsConfig","title":"metrics"},"resource":{"patternProperties":{".*":{"type":"string"}},"title":"resource","type":"object"},"traces":{"$ref":"#/$defs/go.opentelemetry.io.collector.service.telemetry.TracesConfig","title":"traces"}},"type":"object"},"go.opentelemetry.io.collector.service.telemetry.LogsConfig":{"additionalProperties":false,"properties":{"development":{"title":"development","type":"boolean"},"disable_caller":{"title":"disable_caller","type":"boolean"},"disable_stacktrace":{"title":"disable_stacktrace","type":"boolean"},"encoding":{"title":"encoding","type":"string"},"error_output_paths":{"items":{"type":"string"},"title":"error_output_paths","type":"array"},"initial_fields":{"patternProperties":{".*":true},"title":"initial_fields","type":"object"},"level":{"title":"level","type":"integer"},"output_paths":{"items":{"type":"string"},"title":"output_paths","type":"array"},"sampling":{"$ref":"#/$defs/go.opentelemetry.io.collector.service.telemetry.LogsSamplingConfig","title":"sampling"}},"type":"object"},"go.opentelemetry.io.collector.service.telemetry.LogsSamplingConfig":{"additionalProperties":false,"properties":{"initial":{"title":"initial","type":"integer"},"thereafter":{"title":"thereafter","type":"integer"}},"type":"object"},"go.opentelemetry.io.collector.service.telemetry.MetricReader":{"additionalProperties":false,"properties":{"args":{"title":"args"},"type":{"title":"type","type":"string"}},"type":"object"},"go.opentelemetry.io.collector.service.telemetry.MetricsConfig":{"additionalProperties":false,"properties":{"address":{"title":"address","type":"string"},"level":{"title":"level","type":"integer"},"metric_readers":{"items":{"$ref":"#/$defs/go.opentelemetry.io.collector.service.telemetry.MetricReader"},"title":"metric_readers","type":"array"}},"type":"object"},"go.opentelemetry.io.collector.service.telemetry.TracesConfig":{"additionalProperties":false,"properties":{"propagators":{"items":{"type":"string"},"title":"propagators","type":"array"}},"type":"object"},"net.url.Values":{"patternProperties":{".*":{"items":{"type":"string"},"type":"array"}},"type":"object"}},"$schema":"https://json-schema.org/draft/2020-12/schema","properties":{"exporters":{"minProperties":1,"patternProperties":{"^alibabacloudlogservice(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.alibabacloudlogserviceexporter.Config"},"^awscloudwatchlogs(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.awscloudwatchlogsexporter.Config"},"^awsemf(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.awsemfexporter.Config"},"^awskinesis(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.awskinesisexporter.Config"},"^awss3(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.awss3exporter.Config"},"^awsxray(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.awsxrayexporter.Config"},"^azuredataexplorer(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.azuredataexplorerexporter.Config"},"^azuremonitor(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.azuremonitorexporter.Config"},"^carbon(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.carbonexporter.Config"},"^cassandra(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.cassandraexporter.Config"},"^clickhouse(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.clickhouseexporter.Config"},"^coralogix(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.coralogixexporter.Config"},"^datadog(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.datadogexporter.Config"},"^dataset(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.datasetexporter.Config"},"^dynatrace(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.dynatraceexporter.config.Config"},"^elasticsearch(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.elasticsearchexporter.Config"},"^f5cloud(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.f5cloudexporter.Config"},"^file(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.fileexporter.Config"},"^googlecloud(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.googlecloudexporter.Config"},"^googlecloudpubsub(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.googlecloudpubsubexporter.Config"},"^googlemanagedprometheus(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.googlemanagedprometheusexporter.Config"},"^influxdb(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.influxdbexporter.Config"},"^instana(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.instanaexporter.Config"},"^jaeger(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.jaegerexporter.Config"},"^jaegerthrift(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.jaegerthrifthttpexporter.Config"},"^kafka(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.kafkaexporter.Config"},"^loadbalancing(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.loadbalancingexporter.Config"},"^logicmonitor(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.logicmonitorexporter.Config"},"^logzio(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.logzioexporter.Config"},"^loki(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.lokiexporter.Config"},"^mezmo(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.mezmoexporter.Config"},"^opencensus(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.opencensusexporter.Config"},"^parquet(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.parquetexporter.Config"},"^prometheus(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.prometheusexporter.Config"},"^prometheusremotewrite(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.prometheusremotewriteexporter.Config"},"^pulsar(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.pulsarexporter.Config"},"^sapm(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.sapmexporter.Config"},"^sentry(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.sentryexporter.Config"},"^signalfx(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.signalfxexporter.Config"},"^skywalking(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.skywalkingexporter.Config"},"^splunkhec(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.splunkhecexporter.Config"},"^sumologic(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.sumologicexporter.Config"},"^tanzuobservability(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.tanzuobservabilityexporter.Config"},"^tencentcloudlogservice(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.tencentcloudlogserviceexporter.Config"},"^zipkin(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.exporter.zipkinexporter.Config"}},"type":"object"},"extensions":{"minProperties":1,"patternProperties":{"^asapauth(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.extension.asapauthextension.Config"},"^awsproxy(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.extension.awsproxy.Config"},"^ballast(\\/.+)?$":{"$ref":"#/$defs/go.opentelemetry.io.collector.extension.ballastextension.Config"},"^basicauth(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.extension.basicauthextension.Config"},"^bearertokenauth(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.extension.bearertokenauthextension.Config"},"^dbstorage(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.extension.storage.dbstorage.Config"},"^dockerobserver(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.extension.observer.dockerobserver.Config"},"^ecsobserver(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.extension.observer.ecsobserver.Config"},"^ecstaskobserver(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.extension.observer.ecstaskobserver.Config"},"^filestorage(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.extension.storage.filestorage.Config"},"^headerssetter(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.extension.headerssetterextension.Config"},"^healthcheck(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.extension.healthcheckextension.Config"},"^hostobserver(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.extension.observer.hostobserver.Config"},"^httpforwarder(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.extension.httpforwarder.Config"},"^jaegerremotesampling(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.extension.jaegerremotesampling.Config"},"^k8sobserver(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.extension.observer.k8sobserver.Config"},"^oauth2clientauth(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.extension.oauth2clientauthextension.Config"},"^oidcauth(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.extension.oidcauthextension.Config"},"^pprof(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.extension.pprofextension.Config"},"^sigv4auth(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.extension.sigv4authextension.Config"},"^zpages(\\/.+)?$":{"$ref":"#/$defs/go.opentelemetry.io.collector.extension.zpagesextension.Config"}},"type":"object"},"processors":{"minProperties":1,"patternProperties":{"^attributes(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.attributesprocessor.Config"},"^batch(\\/.+)?$":{"$ref":"#/$defs/go.opentelemetry.io.collector.processor.batchprocessor.Config"},"^cumulativetodelta(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.cumulativetodeltaprocessor.Config"},"^datadog(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.datadogprocessor.Config"},"^deltatorate(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.deltatorateprocessor.Config"},"^filter(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.filterprocessor.Config"},"^groupbyattrs(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.groupbyattrsprocessor.Config"},"^groupbytrace(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.groupbytraceprocessor.Config"},"^k8sattributes(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.k8sattributesprocessor.Config"},"^memorylimiter(\\/.+)?$":{"$ref":"#/$defs/go.opentelemetry.io.collector.processor.memorylimiterprocessor.Config"},"^metricsgeneration(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.metricsgenerationprocessor.Config"},"^metricstransform(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.metricstransformprocessor.Config"},"^probabilisticsampler(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.probabilisticsamplerprocessor.Config"},"^redaction(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.redactionprocessor.Config"},"^remoteobserver(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.remoteobserverprocessor.Config"},"^resource(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourceprocessor.Config"},"^resourcedetection(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.resourcedetectionprocessor.Config"},"^routing(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.routingprocessor.Config"},"^schemaprocessor(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.schemaprocessor.Config"},"^servicegraph(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.servicegraphprocessor.Config"},"^span(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.spanprocessor.Config"},"^spanmetrics(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.spanmetricsprocessor.Config"},"^tailsampling(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.tailsamplingprocessor.Config"},"^transform(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.processor.transformprocessor.Config"}},"type":"object"},"receivers":{"minProperties":1,"patternProperties":{"^activedirectoryds(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.activedirectorydsreceiver.Config"},"^aerospikereceiver(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.aerospikereceiver.Config"},"^apache(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachereceiver.Config"},"^apachespark(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.apachesparkreceiver.Config"},"^awscloudwatch(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.awscloudwatchreceiver.Config"},"^awscontainerinsight(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.awscontainerinsightreceiver.Config"},"^awsecscontainermetrics(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.awsecscontainermetricsreceiver.Config"},"^awsfirehose(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.awsfirehosereceiver.Config"},"^awsxray(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.awsxrayreceiver.Config"},"^azureblob(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.azureblobreceiver.Config"},"^azureeventhub(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.azureeventhubreceiver.Config"},"^azuremonitor(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.azuremonitorreceiver.Config"},"^bigip(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.bigipreceiver.Config"},"^carbon(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.carbonreceiver.Config"},"^chrony(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.chronyreceiver.Config"},"^cloudflare(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.cloudflarereceiver.Config"},"^cloudfoundry(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.cloudfoundryreceiver.Config"},"^collectd(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.collectdreceiver.Config"},"^couchdb(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.couchdbreceiver.Config"},"^datadog(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.datadogreceiver.Config"},"^dockerstats(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dockerstatsreceiver.Config"},"^dotnetdiagnostics(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.dotnetdiagnosticsreceiver.Config"},"^elasticsearch(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.elasticsearchreceiver.Config"},"^expvar(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.expvarreceiver.Config"},"^file(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.filereceiver.Config"},"^filelog(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.filelogreceiver.FileLogConfig"},"^filestats(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.filestatsreceiver.Config"},"^flinkmetrics(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.flinkmetricsreceiver.Config"},"^fluentforward(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.fluentforwardreceiver.Config"},"^googlecloudpubsub(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.googlecloudpubsubreceiver.Config"},"^googlecloudspanner(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.googlecloudspannerreceiver.Config"},"^haproxy(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.haproxyreceiver.Config"},"^hostmetrics(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.hostmetricsreceiver.Config"},"^httpcheck(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.httpcheckreceiver.Config"},"^iis(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.iisreceiver.Config"},"^influxdb(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.influxdbreceiver.Config"},"^jaeger(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.jaegerreceiver.Config"},"^jmx(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.jmxreceiver.Config"},"^journald(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.journaldreceiver.JournaldConfig"},"^k8scluster(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.k8sclusterreceiver.Config"},"^k8sevents(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.k8seventsreceiver.Config"},"^k8sobjects(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.k8sobjectsreceiver.Config"},"^kafka(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kafkareceiver.Config"},"^kafkametrics(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kafkametricsreceiver.Config"},"^kubeletstats(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.kubeletstatsreceiver.Config"},"^loki(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.lokireceiver.Config"},"^memcached(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.memcachedreceiver.Config"},"^mongodb(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbreceiver.Config"},"^mongodbatlas(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mongodbatlasreceiver.Config"},"^mysql(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.mysqlreceiver.Config"},"^nginx(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.nginxreceiver.Config"},"^nsxt(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.nsxtreceiver.Config"},"^opencensus(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.opencensusreceiver.Config"},"^oracledb(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.oracledbreceiver.Config"},"^otlp(\\/.+)?$":{"$ref":"#/$defs/go.opentelemetry.io.collector.receiver.otlpreceiver.Config"},"^otlpjsonfile(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.otlpjsonfilereceiver.Config"},"^podman(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.podmanreceiver.Config"},"^postgresql(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.postgresqlreceiver.Config"},"^prometheus(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.prometheusreceiver.Config"},"^prometheusexec(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.prometheusexecreceiver.Config"},"^pulsar(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.pulsarreceiver.Config"},"^purefa(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.purefareceiver.Config"},"^purefb(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.purefbreceiver.Config"},"^rabbitmq(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.rabbitmqreceiver.Config"},"^receivercreator(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.receivercreator.Config"},"^redis(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.redisreceiver.Config"},"^riak(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.riakreceiver.Config"},"^sapm(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sapmreceiver.Config"},"^signalfx(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.signalfxreceiver.Config"},"^simpleprometheus(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.simpleprometheusreceiver.Config"},"^skywalking(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.skywalkingreceiver.Config"},"^snowflake(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.snowflakereceiver.Config"},"^solace(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.solacereceiver.Config"},"^splunkhec(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.splunkhecreceiver.Config"},"^sqlquery(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sqlqueryreceiver.Config"},"^sqlserver(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sqlserverreceiver.Config"},"^sshcheck(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.sshcheckreceiver.Config"},"^statsd(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.statsdreceiver.Config"},"^vcenter(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.vcenterreceiver.Config"},"^wavefront(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.wavefrontreceiver.Config"},"^windowseventlog(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.windowseventlogreceiver.WindowsLogConfig"},"^windowsperfcounters(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.windowsperfcountersreceiver.Config"},"^zipkin(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.zipkinreceiver.Config"},"^zookeeper(\\/.+)?$":{"$ref":"#/$defs/github.com.open-telemetry.opentelemetry-collector-contrib.receiver.zookeeperreceiver.Config"}},"type":"object"},"service":{"$ref":"#/$defs/go.opentelemetry.io.collector.service.Config"}},"required":["receivers","exporters","service"],"title":"OpenTelemetry Collector Config Schema","type":"object"}