######################## Filebeat Configuration ############################ # This file is a full configuration example documenting all non-deprecated # options in comments. For a shorter configuration example, that contains only # the most common options, please see filebeat.yml in the same directory. # # You can find the full configuration reference here: # https://www.elastic.co/guide/en/beats/filebeat/index.html #========================== Modules configuration ============================= filebeat.modules: #-------------------------------- System Module -------------------------------- #- module: system # Syslog #syslog: #enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: # Input configuration (advanced). Any input configuration option # can be added under this section. #input: # Authorization logs #auth: #enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: # Input configuration (advanced). Any input configuration option # can be added under this section. #input: #------------------------------- ActiveMQ Module ------------------------------- - module: activemq # Audit logs audit: enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: # Application logs log: enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: #-------------------------------- Apache Module -------------------------------- #- module: apache # Access logs #access: #enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: # Input configuration (advanced). Any input configuration option # can be added under this section. #input: # Error logs #error: #enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: # Input configuration (advanced). Any input configuration option # can be added under this section. #input: #-------------------------------- Auditd Module -------------------------------- #- module: auditd #log: #enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: # Input configuration (advanced). Any input configuration option # can be added under this section. #input: #--------------------------------- AWS Module --------------------------------- - module: aws cloudtrail: enabled: false # AWS SQS queue url #var.queue_url: https://sqs.myregion.amazonaws.com/123456/myqueue # AWS S3 bucket arn #var.bucket_arn: 'arn:aws:s3:::mybucket' # Bucket list interval on S3 bucket #var.bucket_list_interval: 300s # Number of workers on S3 bucket #var.number_of_workers: 5 # Process CloudTrail logs # default is true, set to false to skip Cloudtrail logs # var.process_cloudtrail_logs: false # Process CloudTrail Digest logs # default true, set to false to skip CloudTrail Digest logs # var.process_digest_logs: false # Process CloudTrail Insight logs # default true, set to false to skip CloudTrail Insight logs # var.process_insight_logs: false # Filename of AWS credential file # If not set "$HOME/.aws/credentials" is used on Linux/Mac # "%UserProfile%\.aws\credentials" is used on Windows #var.shared_credential_file: /etc/filebeat/aws_credentials # Profile name for aws credential # If not set the default profile is used #var.credential_profile_name: fb-aws # Use access_key_id, secret_access_key and/or session_token instead of shared credential file #var.access_key_id: access_key_id #var.secret_access_key: secret_access_key #var.session_token: session_token # The duration that the received messages are hidden from ReceiveMessage request # Default to be 300s #var.visibility_timeout: 300s # Maximum duration before AWS API request will be interrupted # Default to be 120s #var.api_timeout: 120s # Custom endpoint used to access AWS APIs #var.endpoint: amazonaws.com # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. #var.fips_enabled: false # The maximum number of messages to return from SQS. Valid values: 1 to 10. #var.max_number_of_messages: 5 cloudwatch: enabled: false # AWS SQS queue url #var.queue_url: https://sqs.myregion.amazonaws.com/123456/myqueue # AWS S3 bucket arn #var.bucket_arn: 'arn:aws:s3:::mybucket' # Bucket list interval on S3 bucket #var.bucket_list_interval: 300s # Number of workers on S3 bucket #var.number_of_workers: 5 # Filename of AWS credential file # If not set "$HOME/.aws/credentials" is used on Linux/Mac # "%UserProfile%\.aws\credentials" is used on Windows #var.shared_credential_file: /etc/filebeat/aws_credentials # Profile name for aws credential # If not set the default profile is used #var.credential_profile_name: fb-aws # Use access_key_id, secret_access_key and/or session_token instead of shared credential file #var.access_key_id: access_key_id #var.secret_access_key: secret_access_key #var.session_token: session_token # The duration that the received messages are hidden from ReceiveMessage request # Default to be 300s #var.visibility_timeout: 300s # Maximum duration before AWS API request will be interrupted # Default to be 120s #var.api_timeout: 120s # Custom endpoint used to access AWS APIs #var.endpoint: amazonaws.com # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. #var.fips_enabled: false # The maximum number of messages to return from SQS. Valid values: 1 to 10. #var.max_number_of_messages: 5 ec2: enabled: false # AWS SQS queue url #var.queue_url: https://sqs.myregion.amazonaws.com/123456/myqueue # AWS S3 bucket arn #var.bucket_arn: 'arn:aws:s3:::mybucket' # Bucket list interval on S3 bucket #var.bucket_list_interval: 300s # Number of workers on S3 bucket #var.number_of_workers: 5 # Filename of AWS credential file # If not set "$HOME/.aws/credentials" is used on Linux/Mac # "%UserProfile%\.aws\credentials" is used on Windows #var.shared_credential_file: /etc/filebeat/aws_credentials # Profile name for aws credential # If not set the default profile is used #var.credential_profile_name: fb-aws # Use access_key_id, secret_access_key and/or session_token instead of shared credential file #var.access_key_id: access_key_id #var.secret_access_key: secret_access_key #var.session_token: session_token # The duration that the received messages are hidden from ReceiveMessage request # Default to be 300s #var.visibility_timeout: 300s # Maximum duration before AWS API request will be interrupted # Default to be 120s #var.api_timeout: 120s # Custom endpoint used to access AWS APIs #var.endpoint: amazonaws.com # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. #var.fips_enabled: false # The maximum number of messages to return from SQS. Valid values: 1 to 10. #var.max_number_of_messages: 5 elb: enabled: false # AWS SQS queue url #var.queue_url: https://sqs.myregion.amazonaws.com/123456/myqueue # AWS S3 bucket arn #var.bucket_arn: 'arn:aws:s3:::mybucket' # Bucket list interval on S3 bucket #var.bucket_list_interval: 300s # Number of workers on S3 bucket #var.number_of_workers: 5 # Filename of AWS credential file # If not set "$HOME/.aws/credentials" is used on Linux/Mac # "%UserProfile%\.aws\credentials" is used on Windows #var.shared_credential_file: /etc/filebeat/aws_credentials # Profile name for aws credential # If not set the default profile is used #var.credential_profile_name: fb-aws # Use access_key_id, secret_access_key and/or session_token instead of shared credential file #var.access_key_id: access_key_id #var.secret_access_key: secret_access_key #var.session_token: session_token # The duration that the received messages are hidden from ReceiveMessage request # Default to be 300s #var.visibility_timeout: 300s # Maximum duration before AWS API request will be interrupted # Default to be 120s #var.api_timeout: 120s # Custom endpoint used to access AWS APIs #var.endpoint: amazonaws.com # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. #var.fips_enabled: false # The maximum number of messages to return from SQS. Valid values: 1 to 10. #var.max_number_of_messages: 5 s3access: enabled: false # AWS SQS queue url #var.queue_url: https://sqs.myregion.amazonaws.com/123456/myqueue # AWS S3 bucket arn #var.bucket_arn: 'arn:aws:s3:::mybucket' # Bucket list interval on S3 bucket #var.bucket_list_interval: 300s # Number of workers on S3 bucket #var.number_of_workers: 5 # Filename of AWS credential file # If not set "$HOME/.aws/credentials" is used on Linux/Mac # "%UserProfile%\.aws\credentials" is used on Windows #var.shared_credential_file: /etc/filebeat/aws_credentials # Profile name for aws credential # If not set the default profile is used #var.credential_profile_name: fb-aws # Use access_key_id, secret_access_key and/or session_token instead of shared credential file #var.access_key_id: access_key_id #var.secret_access_key: secret_access_key #var.session_token: session_token # The duration that the received messages are hidden from ReceiveMessage request # Default to be 300s #var.visibility_timeout: 300s # Maximum duration before AWS API request will be interrupted # Default to be 120s #var.api_timeout: 120s # Custom endpoint used to access AWS APIs #var.endpoint: amazonaws.com # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. #var.fips_enabled: false # The maximum number of messages to return from SQS. Valid values: 1 to 10. #var.max_number_of_messages: 5 vpcflow: enabled: false # AWS SQS queue url #var.queue_url: https://sqs.myregion.amazonaws.com/123456/myqueue # AWS S3 bucket arn #var.bucket_arn: 'arn:aws:s3:::mybucket' # Bucket list interval on S3 bucket #var.bucket_list_interval: 300s # Number of workers on S3 bucket #var.number_of_workers: 5 # Filename of AWS credential file # If not set "$HOME/.aws/credentials" is used on Linux/Mac # "%UserProfile%\.aws\credentials" is used on Windows #var.shared_credential_file: /etc/filebeat/aws_credentials # Profile name for aws credential # If not set the default profile is used #var.credential_profile_name: fb-aws # Use access_key_id, secret_access_key and/or session_token instead of shared credential file #var.access_key_id: access_key_id #var.secret_access_key: secret_access_key #var.session_token: session_token # The duration that the received messages are hidden from ReceiveMessage request # Default to be 300s #var.visibility_timeout: 300s # Maximum duration before AWS API request will be interrupted # Default to be 120s #var.api_timeout: 120s # Custom endpoint used to access AWS APIs #var.endpoint: amazonaws.com # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. #var.fips_enabled: false # The maximum number of messages to return from SQS. Valid values: 1 to 10. #var.max_number_of_messages: 5 #----------------------------- AWS Fargate Module ----------------------------- - module: awsfargate log: enabled: false # Filename of AWS credential file # If not set "$HOME/.aws/credentials" is used on Linux/Mac # "%UserProfile%\.aws\credentials" is used on Windows #var.shared_credential_file: /etc/filebeat/aws_credentials # Profile name for aws credential # If not set the default profile is used #var.credential_profile_name: fb-aws # Use access_key_id, secret_access_key and/or session_token instead of shared credential file #var.access_key_id: access_key_id #var.secret_access_key: secret_access_key #var.session_token: session_token # Maximum duration before AWS API request will be interrupted # Default to be 120s #var.api_timeout: 120s # Custom endpoint used to access AWS APIs #var.endpoint: amazonaws.com # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb # ARN of the log group to collect logs from #var.log_group_arn: arn:aws:logs:us-east-1:123456789012:log-group:test:* # Name of the log group to collect logs from #var.log_group_name: /ecs/test-log-group # Region that the specified log group belongs to #var.region_name: us-east-1 # A list of strings of log streams names that Filebeat collect log events from #var.log_streams: ["/ecs/test-log-group/test-log-stream"] # A string to filter the results to include only log events from log streams that have names starting with this prefix #var.log_stream_prefix: /ecs/test-log-group/ # Specify if Filebeat should read log files from the beginning or from the end # Default start_position is beginning #var.start_position: beginning # How often Filebeat checks for new log events from the specified log group # Default scan_frequency is 1 minute #var.scan_frequency: 1m # Time used to sleep between AWS FilterLogEvents API calls inside the same collection period # Default api_sleep is 200 ms #var.api_sleep: 200ms #-------------------------------- Azure Module -------------------------------- - module: azure # All logs activitylogs: enabled: true var: # eventhub name containing the activity logs, overwrite he default value if the logs are exported in a different eventhub eventhub: "insights-operational-logs" # consumer group name that has access to the event hub, we advise creating a dedicated consumer group for the azure module consumer_group: "$Default" # the connection string required to communicate with Event Hubs, steps to generate one here https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-get-connection-string connection_string: "" # the name of the storage account the state/offsets will be stored and updated storage_account: "" # the storage account key, this key will be used to authorize access to data in your storage account storage_account_key: "" platformlogs: enabled: false # var: # eventhub: "" # consumer_group: "$Default" # connection_string: "" # storage_account: "" # storage_account_key: "" auditlogs: enabled: false # var: # eventhub: "insights-logs-auditlogs" # consumer_group: "$Default" # connection_string: "" # storage_account: "" # storage_account_key: "" signinlogs: enabled: false # var: # eventhub: "insights-logs-signinlogs" # consumer_group: "$Default" # connection_string: "" # storage_account: "" # storage_account_key: "" #------------------ Barracuda Web Application Firewall Module ------------------ - module: barracuda waf: enabled: true # Set which input to use between udp (default), tcp or file. # var.input: udp # var.syslog_host: localhost # var.syslog_port: 9503 # Set paths for the log files when file input is used. # var.paths: # Toggle output of non-ECS fields (default true). # var.rsa_fields: true # Set custom timezone offset. # "local" (default) for system timezone. # "+02:00" for GMT+02:00 # var.tz_offset: local spamfirewall: enabled: true # Set which input to use between udp (default), tcp or file. # var.input: udp # var.syslog_host: localhost # var.syslog_port: 9524 # Set paths for the log files when file input is used. # var.paths: # Toggle output of non-ECS fields (default true). # var.rsa_fields: true # Set custom timezone offset. # "local" (default) for system timezone. # "+02:00" for GMT+02:00 # var.tz_offset: local #-------------------------- Blue Coat Director Module -------------------------- - module: bluecoat director: enabled: true # Set which input to use between udp (default), tcp or file. # var.input: udp # var.syslog_host: localhost # var.syslog_port: 9505 # Set paths for the log files when file input is used. # var.paths: # Toggle output of non-ECS fields (default true). # var.rsa_fields: true # Set custom timezone offset. # "local" (default) for system timezone. # "+02:00" for GMT+02:00 # var.tz_offset: local #--------------------------------- CEF Module --------------------------------- - module: cef log: enabled: true var: syslog_host: localhost syslog_port: 9003 # Set internal security zones. used to override parsed network.direction # based on zone egress and ingress #var.internal_zones: [ "Internal" ] # Set external security zones. used to override parsed network.direction # based on zone egress and ingress #var.external_zones: [ "External" ] #------------------------------ Checkpoint Module ------------------------------ - module: checkpoint firewall: enabled: true # Set which input to use between syslog (default) or file. #var.input: syslog # The interface to listen to UDP based syslog traffic. Defaults to # localhost. Set to 0.0.0.0 to bind to all available interfaces. #var.syslog_host: localhost # The UDP port to listen for syslog traffic. Defaults to 9001. #var.syslog_port: 9001 # Set internal security zones. used to override parsed network.direction # based on zone egress and ingress #var.internal_zones: [ "Internal" ] # Set external security zones. used to override parsed network.direction # based on zone egress and ingress #var.external_zones: [ "External" ] #-------------------------------- Cisco Module -------------------------------- - module: cisco asa: enabled: true # Set which input to use between syslog (default) or file. #var.input: syslog # The interface to listen to UDP based syslog traffic. Defaults to # localhost. Set to 0.0.0.0 to bind to all available interfaces. #var.syslog_host: localhost # The UDP port to listen for syslog traffic. Defaults to 9001. #var.syslog_port: 9001 # Set the log level from 1 (alerts only) to 7 (include all messages). # Messages with a log level higher than the specified will be dropped. # See https://www.cisco.com/c/en/us/td/docs/security/asa/syslog/b_syslog/syslogs-sev-level.html #var.log_level: 7 # Set internal security zones. used to override parsed network.direction # based on zone egress and ingress #var.internal_zones: [ "Internal" ] # Set external security zones. used to override parsed network.direction # based on zone egress and ingress #var.external_zones: [ "External" ] ftd: enabled: true # Set which input to use between syslog (default) or file. #var.input: syslog # The interface to listen to UDP based syslog traffic. Defaults to # localhost. Set to 0.0.0.0 to bind to all available interfaces. #var.syslog_host: localhost # The UDP port to listen for syslog traffic. Defaults to 9003. #var.syslog_port: 9003 # Set the log level from 1 (alerts only) to 7 (include all messages). # Messages with a log level higher than the specified will be dropped. # See https://www.cisco.com/c/en/us/td/docs/security/firepower/Syslogs/b_fptd_syslog_guide/syslogs-sev-level.html #var.log_level: 7 # Set internal security zones. used to override parsed network.direction # based on zone egress and ingress #var.internal_zones: [ "Internal" ] # Set external security zones. used to override parsed network.direction # based on zone egress and ingress #var.external_zones: [ "External" ] ios: enabled: true # Set which input to use between syslog (default) or file. #var.input: syslog # The interface to listen to UDP based syslog traffic. Defaults to # localhost. Set to 0.0.0.0 to bind to all available interfaces. #var.syslog_host: localhost # The UDP port to listen for syslog traffic. Defaults to 9002. #var.syslog_port: 9002 # Set custom paths for the log files when using file input. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: nexus: enabled: true # Set which input to use between udp (default), tcp or file. # var.input: udp # var.syslog_host: localhost # var.syslog_port: 9506 # Set paths for the log files when file input is used. # var.paths: # Toggle output of non-ECS fields (default true). # var.rsa_fields: true # Set custom timezone offset. # "local" (default) for system timezone. # "+02:00" for GMT+02:00 # var.tz_offset: local meraki: enabled: true # Set which input to use between udp (default), tcp or file. # var.input: udp # var.syslog_host: localhost # var.syslog_port: 9525 # Set paths for the log files when file input is used. # var.paths: # Toggle output of non-ECS fields (default true). # var.rsa_fields: true # Set custom timezone offset. # "local" (default) for system timezone. # "+02:00" for GMT+02:00 # var.tz_offset: local umbrella: enabled: true #var.input: aws-s3 # AWS SQS queue url #var.queue_url: https://sqs.us-east-1.amazonaws.com/ID/CiscoQueue # Access ID to authenticate with the S3 input #var.access_key_id: 123456 # Access key to authenticate with the S3 input #var.secret_access_key: PASSWORD # The duration that the received messages are hidden from ReceiveMessage request #var.visibility_timeout: 300s # Maximum duration before AWS API request will be interrupted #var.api_timeout: 120s amp: enabled: true # Set which input to use between httpjson (default) or file. #var.input: httpjson # The API URL #var.url: https://api.amp.cisco.com/v1/events # The client ID used as a username for the API requests. #var.client_id: # The API key related to the client ID. #var.api_key: # How far to look back the first time the module is started. Expects an amount of hours. #var.first_interval: 24h # Overriding the default request timeout, optional. #var.request_timeout: 60s #------------------------------- Coredns Module ------------------------------- - module: coredns # Fileset for native deployment log: enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: #----------------------------- Crowdstrike Module ----------------------------- - module: crowdstrike falcon: enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: #------------------------------ Cyber-Ark Module ------------------------------ # The cyberark module is deprecated and will be removed in future releases. # Please use the Cyberark Privileged Account Security (cyberarkpas) module instead. - module: cyberark corepas: enabled: true # Set which input to use between udp (default), tcp or file. # var.input: udp # var.syslog_host: localhost # var.syslog_port: 9527 # Set paths for the log files when file input is used. # var.paths: # Toggle output of non-ECS fields (default true). # var.rsa_fields: true # Set custom timezone offset. # "local" (default) for system timezone. # "+02:00" for GMT+02:00 # var.tz_offset: local #----------------------------- CyberArk PAS Module ----------------------------- - module: cyberarkpas audit: enabled: true # Set which input to use between tcp (default), udp, or file. # # var.input: tcp # var.syslog_host: localhost # var.syslog_port: 9301 # With tcp input, set the optional tls configuration: #var.ssl: # enabled: true # certificate: /path/to/cert.pem # key: /path/to/privatekey.pem # key_passphrase: 'password for my key' # Uncoment to keep the original syslog event under event.original. # var.preserve_original_event: true # Set paths for the log files when file input is used. # var.paths: #---------------------------- CylanceProtect Module ---------------------------- - module: cylance protect: enabled: true # Set which input to use between udp (default), tcp or file. # var.input: udp # var.syslog_host: localhost # var.syslog_port: 9508 # Set paths for the log files when file input is used. # var.paths: # Toggle output of non-ECS fields (default true). # var.rsa_fields: true # Set custom timezone offset. # "local" (default) for system timezone. # "+02:00" for GMT+02:00 # var.tz_offset: local #---------------------------- Elasticsearch Module ---------------------------- - module: elasticsearch # Server log server: enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: gc: enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: audit: enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: slowlog: enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: deprecation: enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: #------------------------------ Envoyproxy Module ------------------------------ - module: envoyproxy # Fileset for native deployment log: enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: #--------------------- Big-IP Access Policy Manager Module --------------------- - module: f5 bigipapm: enabled: true # Set which input to use between udp (default), tcp or file. # var.input: udp # var.syslog_host: localhost # var.syslog_port: 9504 # Set paths for the log files when file input is used. # var.paths: # Toggle output of non-ECS fields (default true). # var.rsa_fields: true # Set custom timezone offset. # "local" (default) for system timezone. # "+02:00" for GMT+02:00 # var.tz_offset: local bigipafm: enabled: true # Set which input to use between udp (default), tcp or file. # var.input: udp # var.syslog_host: localhost # var.syslog_port: 9528 # Set paths for the log files when file input is used. # var.paths: # Toggle output of non-ECS fields (default true). # var.rsa_fields: true # Set custom timezone offset. # "local" (default) for system timezone. # "+02:00" for GMT+02:00 # var.tz_offset: local #------------------------------- Fortinet Module ------------------------------- - module: fortinet firewall: enabled: true # Set which input to use between tcp, udp (default) or file. #var.input: udp # The interface to listen to syslog traffic. Defaults to # localhost. Set to 0.0.0.0 to bind to all available interfaces. #var.syslog_host: localhost # The port to listen for syslog traffic. Defaults to 9004. #var.syslog_port: 9004 # Set internal interfaces. used to override parsed network.direction # based on a tagged interface. Both internal and external interfaces must be # set to leverage this functionality. #var.internal_interfaces: [ "LAN" ] # Set external interfaces. used to override parsed network.direction # based on a tagged interface. Both internal and external interfaces must be # set to leverage this functionality. #var.external_interfaces: [ "WAN" ] clientendpoint: enabled: true # Set which input to use between udp (default), tcp or file. # var.input: udp # var.syslog_host: localhost # var.syslog_port: 9510 # Set paths for the log files when file input is used. # var.paths: # Toggle output of non-ECS fields (default true). # var.rsa_fields: true # Set custom timezone offset. # "local" (default) for system timezone. # "+02:00" for GMT+02:00 # var.tz_offset: local fortimail: enabled: true # Set which input to use between udp (default), tcp or file. # var.input: udp # var.syslog_host: localhost # var.syslog_port: 9529 # Set paths for the log files when file input is used. # var.paths: # Toggle output of non-ECS fields (default true). # var.rsa_fields: true # Set custom timezone offset. # "local" (default) for system timezone. # "+02:00" for GMT+02:00 # var.tz_offset: local fortimanager: enabled: true # Set which input to use between udp (default), tcp or file. # var.input: udp # var.syslog_host: localhost # var.syslog_port: 9530 # Set paths for the log files when file input is used. # var.paths: # Toggle output of non-ECS fields (default true). # var.rsa_fields: true # Set custom timezone offset. # "local" (default) for system timezone. # "+02:00" for GMT+02:00 # var.tz_offset: local #--------------------- Google Cloud Platform (GCP) Module --------------------- - module: gcp vpcflow: enabled: true # Google Cloud project ID. var.project_id: my-gcp-project-id # Google Pub/Sub topic containing VPC flow logs. Stackdriver must be # configured to use this topic as a sink for VPC flow logs. var.topic: gcp-vpc-flowlogs # Google Pub/Sub subscription for the topic. Filebeat will create this # subscription if it does not exist. var.subscription_name: filebeat-gcp-vpc-flowlogs-sub # Credentials file for the service account with authorization to read from # the subscription. var.credentials_file: ${path.config}/gcp-service-account-xyz.json # Set internal networks. This is used to classify network.direction based # off of what networks are considered "internal" either base off of a CIDR # block or named network conditions. If this is not specified, then traffic # direction is determined by whether it is between source and destination # instance information rather than IP. # # For a full list of network conditions see: # https://www.elastic.co/guide/en/beats/filebeat/current/defining-processors.html#condition-network #var.internal_networks: [ "private" ] firewall: enabled: true # Google Cloud project ID. var.project_id: my-gcp-project-id # Google Pub/Sub topic containing firewall logs. Stackdriver must be # configured to use this topic as a sink for firewall logs. var.topic: gcp-vpc-firewall # Google Pub/Sub subscription for the topic. Filebeat will create this # subscription if it does not exist. var.subscription_name: filebeat-gcp-firewall-sub # Credentials file for the service account with authorization to read from # the subscription. var.credentials_file: ${path.config}/gcp-service-account-xyz.json # Set internal networks. This is used to classify network.direction based # off of what networks are considered "internal" either base off of a CIDR # block or named network conditions. If this is not specified, then traffic # is taken from the direction data in the rule_details event payload. # # For a full list of network conditions see: # https://www.elastic.co/guide/en/beats/filebeat/current/defining-processors.html#condition-network #var.internal_networks: [ "private" ] audit: enabled: true # Google Cloud project ID. var.project_id: my-gcp-project-id # Google Pub/Sub topic containing firewall logs. Stackdriver must be # configured to use this topic as a sink for firewall logs. var.topic: gcp-vpc-audit # Google Pub/Sub subscription for the topic. Filebeat will create this # subscription if it does not exist. var.subscription_name: filebeat-gcp-audit # Credentials file for the service account with authorization to read from # the subscription. var.credentials_file: ${path.config}/gcp-service-account-xyz.json #--------------------------- Google_workspace Module --------------------------- - module: google_workspace saml: enabled: true # var.jwt_file: credentials.json # var.delegated_account: admin@example.com # var.initial_interval: 24h # var.http_client_timeout: 60s # var.user_key: all # var.interval: 2h user_accounts: enabled: true # var.jwt_file: credentials.json # var.delegated_account: admin@example.com # var.initial_interval: 24h # var.http_client_timeout: 60s # var.user_key: all # var.interval: 2h login: enabled: true # var.jwt_file: credentials.json # var.delegated_account: admin@example.com # var.initial_interval: 24h # var.http_client_timeout: 60s # var.user_key: all # var.interval: 2h admin: enabled: true # var.jwt_file: credentials.json # var.delegated_account: admin@example.com # var.initial_interval: 24h # var.http_client_timeout: 60s # var.user_key: all # var.interval: 2h drive: enabled: true # var.jwt_file: credentials.json # var.delegated_account: admin@example.com # var.initial_interval: 24h # var.http_client_timeout: 60s # var.user_key: all # var.interval: 2h groups: enabled: true # var.jwt_file: credentials.json # var.delegated_account: admin@example.com # var.initial_interval: 24h # var.http_client_timeout: 60s # var.user_key: all # var.interval: 2h #----------------------------- Googlecloud Module ----------------------------- # googlecloud module is deprecated, please use gcp instead - module: gcp vpcflow: enabled: true # Google Cloud project ID. var.project_id: my-gcp-project-id # Google Pub/Sub topic containing VPC flow logs. Stackdriver must be # configured to use this topic as a sink for VPC flow logs. var.topic: gcp-vpc-flowlogs # Google Pub/Sub subscription for the topic. Filebeat will create this # subscription if it does not exist. var.subscription_name: filebeat-gcp-vpc-flowlogs-sub # Credentials file for the service account with authorization to read from # the subscription. var.credentials_file: ${path.config}/gcp-service-account-xyz.json firewall: enabled: true # Google Cloud project ID. var.project_id: my-gcp-project-id # Google Pub/Sub topic containing firewall logs. Stackdriver must be # configured to use this topic as a sink for firewall logs. var.topic: gcp-vpc-firewall # Google Pub/Sub subscription for the topic. Filebeat will create this # subscription if it does not exist. var.subscription_name: filebeat-gcp-firewall-sub # Credentials file for the service account with authorization to read from # the subscription. var.credentials_file: ${path.config}/gcp-service-account-xyz.json audit: enabled: true # Google Cloud project ID. var.project_id: my-gcp-project-id # Google Pub/Sub topic containing firewall logs. Stackdriver must be # configured to use this topic as a sink for firewall logs. var.topic: gcp-vpc-audit # Google Pub/Sub subscription for the topic. Filebeat will create this # subscription if it does not exist. var.subscription_name: filebeat-gcp-audit # Credentials file for the service account with authorization to read from # the subscription. var.credentials_file: ${path.config}/gcp-service-account-xyz.json #-------------------------------- Gsuite Module -------------------------------- # Gsuite module is deprecated and will be removed in future releases. Please use Google Workspace module instead. - module: gsuite saml: enabled: true # var.jwt_file: credentials.json # var.delegated_account: admin@example.com # var.initial_interval: 24h # var.http_client_timeout: 60s # var.user_key: all # var.interval: 2h user_accounts: enabled: true # var.jwt_file: credentials.json # var.delegated_account: admin@example.com # var.initial_interval: 24h # var.http_client_timeout: 60s # var.user_key: all # var.interval: 2h login: enabled: true # var.jwt_file: credentials.json # var.delegated_account: admin@example.com # var.initial_interval: 24h # var.http_client_timeout: 60s # var.user_key: all # var.interval: 2h admin: enabled: true # var.jwt_file: credentials.json # var.delegated_account: admin@example.com # var.initial_interval: 24h # var.http_client_timeout: 60s # var.user_key: all # var.interval: 2h drive: enabled: true # var.jwt_file: credentials.json # var.delegated_account: admin@example.com # var.initial_interval: 24h # var.http_client_timeout: 60s # var.user_key: all # var.interval: 2h groups: enabled: true # var.jwt_file: credentials.json # var.delegated_account: admin@example.com # var.initial_interval: 24h # var.http_client_timeout: 60s # var.user_key: all # var.interval: 2h #------------------------------- HAProxy Module ------------------------------- - module: haproxy # All logs log: enabled: true # Set which input to use between syslog (default) or file. #var.input: # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: #-------------------------------- Ibmmq Module -------------------------------- - module: ibmmq # All logs errorlog: enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: #-------------------------------- Icinga Module -------------------------------- #- module: icinga # Main logs #main: #enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: # Input configuration (advanced). Any input configuration option # can be added under this section. #input: # Debug logs #debug: #enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: # Input configuration (advanced). Any input configuration option # can be added under this section. #input: # Startup logs #startup: #enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: # Input configuration (advanced). Any input configuration option # can be added under this section. #input: #--------------------------------- IIS Module --------------------------------- #- module: iis # Access logs #access: #enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: # Input configuration (advanced). Any input configuration option # can be added under this section. #input: # Error logs #error: #enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: # Input configuration (advanced). Any input configuration option # can be added under this section. #input: #------------------------- Imperva SecureSphere Module ------------------------- - module: imperva securesphere: enabled: true # Set which input to use between udp (default), tcp or file. # var.input: udp # var.syslog_host: localhost # var.syslog_port: 9511 # Set paths for the log files when file input is used. # var.paths: # Toggle output of non-ECS fields (default true). # var.rsa_fields: true # Set custom timezone offset. # "local" (default) for system timezone. # "+02:00" for GMT+02:00 # var.tz_offset: local #---------------------------- Infoblox NIOS Module ---------------------------- - module: infoblox nios: enabled: true # Set which input to use between udp (default), tcp or file. # var.input: udp # var.syslog_host: localhost # var.syslog_port: 9512 # Set paths for the log files when file input is used. # var.paths: # Toggle output of non-ECS fields (default true). # var.rsa_fields: true # Set custom timezone offset. # "local" (default) for system timezone. # "+02:00" for GMT+02:00 # var.tz_offset: local #------------------------------- Iptables Module ------------------------------- - module: iptables log: enabled: true # Set which input to use between syslog (default) or file. #var.input: # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: #---------------------------- Juniper JUNOS Module ---------------------------- - module: juniper junos: enabled: true # Set which input to use between udp (default), tcp or file. # var.input: udp # var.syslog_host: localhost # var.syslog_port: 9513 # Set paths for the log files when file input is used. # var.paths: # Toggle output of non-ECS fields (default true). # var.rsa_fields: true # Set custom timezone offset. # "local" (default) for system timezone. # "+02:00" for GMT+02:00 # var.tz_offset: local netscreen: enabled: true # Set which input to use between udp (default), tcp or file. # var.input: udp # var.syslog_host: localhost # var.syslog_port: 9523 # Set paths for the log files when file input is used. # var.paths: # Toggle output of non-ECS fields (default true). # var.rsa_fields: true # Set custom timezone offset. # "local" (default) for system timezone. # "+02:00" for GMT+02:00 # var.tz_offset: local srx: enabled: true # Set which input to use between tcp, udp (default) or file. #var.input: udp # The interface to listen to syslog traffic. Defaults to # localhost. Set to 0.0.0.0 to bind to all available interfaces. #var.syslog_host: localhost # The port to listen for syslog traffic. Defaults to 9006. #var.syslog_port: 9006 #-------------------------------- Kafka Module -------------------------------- - module: kafka # All logs log: enabled: true # Set custom paths for Kafka. If left empty, # Filebeat will look under /opt. #var.kafka_home: # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: #-------------------------------- Kibana Module -------------------------------- - module: kibana # Server logs log: enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: # Audit logs audit: enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: #------------------------------- Logstash Module ------------------------------- #- module: logstash # logs #log: #enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. # var.paths: # Slow logs #slowlog: #enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: #------------------------------ Microsoft Module ------------------------------ - module: microsoft # ATP configuration defender_atp: enabled: true # How often the API should be polled #var.interval: 5m # Oauth Client ID #var.oauth2.client.id: "" # Oauth Client Secret #var.oauth2.client.secret: "" # Oauth Token URL, should include the tenant ID #var.oauth2.token_url: "https://login.microsoftonline.com/TENANT-ID/oauth2/token" m365_defender: enabled: true # How often the API should be polled #var.interval: 5m # Oauth Client ID #var.oauth2.client.id: "" # Oauth Client Secret #var.oauth2.client.secret: "" # Oauth Token URL, should include the tenant ID #var.oauth2.token_url: "https://login.microsoftonline.com/TENANT-ID/oauth2/v2.0/token" # Related scopes, default should be included #var.oauth2.scopes: # - "https://api.security.microsoft.com/.default" dhcp: enabled: true # Set which input to use between udp (default), tcp or file. # var.input: udp # var.syslog_host: localhost # var.syslog_port: 9515 # Set paths for the log files when file input is used. # var.paths: # Toggle output of non-ECS fields (default true). # var.rsa_fields: true # Set custom timezone offset. # "local" (default) for system timezone. # "+02:00" for GMT+02:00 # var.tz_offset: local #--------------------------------- MISP Module --------------------------------- # Deprecated in 7.14.0: Recommended to migrate to the Threat Intel module. - module: misp threat: enabled: true # API key to access MISP #var.api_key # Array object in MISP response #var.http_request_body.limit: 1000 # URL of the MISP REST API #var.url # You can also pass SSL options. For example: #var.ssl.verification_mode: none #------------------------------- Mongodb Module ------------------------------- #- module: mongodb # Logs #log: #enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: # Input configuration (advanced). Any input configuration option # can be added under this section. #input: #-------------------------------- Mssql Module -------------------------------- - module: mssql # Fileset for native deployment log: enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: ['C:\Program Files\Microsoft SQL Server\MSSQL.150\MSSQL\LOG\ERRORLOG*'] #-------------------------------- MySQL Module -------------------------------- #- module: mysql # Error logs #error: #enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: # Input configuration (advanced). Any input configuration option # can be added under this section. #input: # Slow logs #slowlog: #enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: # Input configuration (advanced). Any input configuration option # can be added under this section. #input: #--------------------------- MySQL Enterprise Module --------------------------- - module: mysqlenterprise audit: enabled: true # Sets the input type. Currently only supports file #var.input: file # Set paths for the log files when file input is used. # Should only be used together with file input # var.paths: # - /home/user/mysqlauditlogs/audit.*.log #--------------------------------- NATS Module --------------------------------- - module: nats # All logs log: enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: #------------------------------- NetFlow Module ------------------------------- - module: netflow log: enabled: true var: netflow_host: localhost netflow_port: 2055 # internal_networks specifies which networks are considered internal or private # you can specify either a CIDR block or any of the special named ranges listed # at: https://www.elastic.co/guide/en/beats/filebeat/current/defining-processors.html#condition-network internal_networks: - private #-------------------------- Arbor Peakflow SP Module -------------------------- - module: netscout sightline: enabled: true # Set which input to use between udp (default), tcp or file. # var.input: udp # var.syslog_host: localhost # var.syslog_port: 9502 # Set paths for the log files when file input is used. # var.paths: # Toggle output of non-ECS fields (default true). # var.rsa_fields: true # Set custom timezone offset. # "local" (default) for system timezone. # "+02:00" for GMT+02:00 # var.tz_offset: local #-------------------------------- Nginx Module -------------------------------- #- module: nginx # Access logs #access: #enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: # Input configuration (advanced). Any input configuration option # can be added under this section. #input: # Error logs #error: #enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: # Input configuration (advanced). Any input configuration option # can be added under this section. #input: # Ingress-nginx controller logs. This is disabled by default. It could be used in Kubernetes environments to parse ingress-nginx logs #ingress_controller: # enabled: false # # # Set custom paths for the log files. If left empty, # # Filebeat will choose the paths depending on your OS. # #var.paths: #------------------------------ Office 365 Module ------------------------------ - module: o365 audit: enabled: true # Set the application_id (also known as client ID): var.application_id: "" # Configure the tenants to monitor: # Use the tenant ID (also known as directory ID) and the domain name. # var.tenants: # - id: "tenant_id_1" # name: "mydomain.onmicrosoft.com" # - id: "tenant_id_2" # name: "mycompany.com" var.tenants: - id: "" name: "mytenant.onmicrosoft.com" # List of content-types to fetch. By default all known content-types # are retrieved: # var.content_type: # - "Audit.AzureActiveDirectory" # - "Audit.Exchange" # - "Audit.SharePoint" # - "Audit.General" # - "DLP.All" # Use the following settings to enable certificate-based authentication: # var.certificate: "/path/to/certificate.pem" # var.key: "/path/to/private_key.pem" # var.key_passphrase: "myPrivateKeyPassword" # Client-secret based authentication: # Comment the following line if using certificate authentication. var.client_secret: "" # Advanced settings, use with care: # var.api: # # Settings for custom endpoints: # authentication_endpoint: "https://login.microsoftonline.us/" # resource: "https://manage.office365.us" # # max_retention: 168h # max_requests_per_minute: 2000 # poll_interval: 3m #--------------------------------- Okta Module --------------------------------- - module: okta system: enabled: true # You must configure the URL with your Okta domain and provide an # API token to access the logs API. #var.url: https://yourOktaDomain/api/v1/logs #var.api_key: 'yourApiTokenHere' #-------------------------------- Oracle Module -------------------------------- - module: oracle database_audit: enabled: true # Set which input to use between syslog or file (default). #var.input: file # Set paths for the log files when file input is used. # Should only be used together with file input #var.paths: ["/home/user/oracleauditlogs/*.aud"] #------------------------------- Osquery Module ------------------------------- - module: osquery result: enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: # If true, all fields created by this module are prefixed with # `osquery.result`. Set to false to copy the fields in the root # of the document. The default is true. #var.use_namespace: true #--------------------------------- Panw Module --------------------------------- - module: panw panos: enabled: true # Set which input to use between syslog (default) or file. #var.input: # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: # Set internal security zones. used to determine network.direction # default "trust" #var.internal_zones: # Set external security zones. used to determine network.direction # default "untrust" #var.external_zones: #------------------------------- Pensando Module ------------------------------- - module: pensando # Firewall logs dfw: enabled: true var.syslog_host: 0.0.0.0 var.syslog_port: 9001 # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. # var.paths: #------------------------------ PostgreSQL Module ------------------------------ #- module: postgresql # Logs #log: #enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: # Input configuration (advanced). Any input configuration option # can be added under this section. #input: #---------------------- Proofpoint Email Security Module ---------------------- - module: proofpoint emailsecurity: enabled: true # Set which input to use between udp (default), tcp or file. # var.input: udp # var.syslog_host: localhost # var.syslog_port: 9531 # Set paths for the log files when file input is used. # var.paths: # Toggle output of non-ECS fields (default true). # var.rsa_fields: true # Set custom timezone offset. # "local" (default) for system timezone. # "+02:00" for GMT+02:00 # var.tz_offset: local #------------------------------- RabbitMQ Module ------------------------------- - module: rabbitmq # All logs log: enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: ["/var/log/rabbitmq/rabbit@localhost.log*"] #-------------------------- Radware DefensePro Module -------------------------- - module: radware defensepro: enabled: true # Set which input to use between udp (default), tcp or file. # var.input: udp # var.syslog_host: localhost # var.syslog_port: 9518 # Set paths for the log files when file input is used. # var.paths: # Toggle output of non-ECS fields (default true). # var.rsa_fields: true # Set custom timezone offset. # "local" (default) for system timezone. # "+02:00" for GMT+02:00 # var.tz_offset: local #-------------------------------- Redis Module -------------------------------- #- module: redis # Main logs #log: #enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: ["/var/log/redis/redis-server.log*"] # Slow logs, retrieved via the Redis API (SLOWLOG) #slowlog: #enabled: true # The Redis hosts to connect to. #var.hosts: ["localhost:6379"] # Optional, the password to use when connecting to Redis. #var.password: #----------------------------- Google Santa Module ----------------------------- - module: santa log: enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the the default path. #var.paths: #--------------------------- Snort/Sourcefire Module --------------------------- - module: snort log: enabled: true # Set which input to use between udp (default), tcp or file. # var.input: udp # var.syslog_host: localhost # var.syslog_port: 9532 # Set paths for the log files when file input is used. # var.paths: # Toggle output of non-ECS fields (default true). # var.rsa_fields: true # Set custom timezone offset. # "local" (default) for system timezone. # "+02:00" for GMT+02:00 # var.tz_offset: local #--------------------------------- Snyk Module --------------------------------- - module: snyk audit: enabled: true # Set which input to use between httpjson (default) or file. #var.input: httpjson # # What audit type to collect, can be either "group" or "organization". #var.audit_type: organization # # The ID related to the audit_type. If audit type is group, then this value should be # the group ID and if it is organization it should be the organization ID to collect from. #var.audit_id: 1235432-asdfdf-2341234-asdgjhg # How often the API should be polled, defaults to 1 hour. #var.interval: 1h # How far to look back the first time the module starts up. (Only works with full days, 24 hours, 48 hours etc). #var.first_interval: 24h # The API token that is created for a specific user, found in the Snyk management dashboard. #var.api_token: # Event filtering. # All configuration items below is OPTIONAL and the default options will be overwritten # for each entry that is not commented out. # Will return only logs for this specific project. #var.project_id: "" # User public ID. Will fetch only audit logs originated from this user's actions. #var.user_id: "" # Will return only logs for this specific event. #var.event: "" # User email address. Will fetch only audit logs originated from this user's actions. #var.email_address: "" vulnerabilities: enabled: true # Set which input to use between httpjson (default) or file. #var.input: httpjson # How often the API should be polled. Data from the Snyk API is automatically updated # once per day, so the default interval is 24 hours. #var.interval: 24h # How far to look back the first time the module starts up. (Only works with full days, 24 hours, 48 hours etc). #var.first_interval: 24h # The API token that is created for a specific user, found in the Snyk management dashboard. #var.api_token: # The list of org IDs to filter the results by. # One organization ID per line, starting with a - sign #var.orgs: # - 12354-asdfdf-123543-asdsdfg # - 76554-jhggfd-654342-hgrfasd # Event filtering. # All configuration items below is OPTIONAL and the default options will be overwritten # for each entry that is not commented out. # The severity levels of issues to filter the results by. #var.included_severity: # - critical # - high # - medium # - low # # The exploit maturity levels of issues to filter the results by. #var.exploit_maturity: # - mature # - proof-of-concept # - no-known-exploit # - no-data # # The type of issues to filter the results by. #var.types: # - vuln # - license # - configuration # # The type of languages to filter the results by. #var.languages: # - javascript # - ruby # - java # - scala # - python # - golang # - php # - dotnet # - swift-objective-c # - elixir # - docker # - terraform # - kubernetes # - helm # - cloudformation # # Search term to filter issue name by, or an exact CVE or CWE. #var.identifier: # - "" # # If set to true, only include issues which are ignored, if set to false, only include issues which are not ignored. #var.ignored: false #var.patched: false #var.fixable: false #var.is_fixed: false #var.is_patchable: false #var.is_pinnable: false # # The priority score ranging between 0-1000 #var.min_priority_score: 0 #var.max_priority_score: 1000 #----------------------------- Sonicwall-FW Module ----------------------------- - module: sonicwall firewall: enabled: true # Set which input to use between udp (default), tcp or file. # var.input: udp # var.syslog_host: localhost # var.syslog_port: 9519 # Set paths for the log files when file input is used. # var.paths: # Toggle output of non-ECS fields (default true). # var.rsa_fields: true # Set custom timezone offset. # "local" (default) for system timezone. # "+02:00" for GMT+02:00 # var.tz_offset: local #-------------------------------- Sophos Module -------------------------------- - module: sophos xg: enabled: true # Set which input to use between tcp, udp (default) or file. #var.input: udp # The interface to listen to syslog traffic. Defaults to # localhost. Set to 0.0.0.0 to bind to all available interfaces. #var.syslog_host: localhost # The port to listen for syslog traffic. Defaults to 9004. #var.syslog_port: 9005 # firewall default hostname #var.default_host_name: firewall.localgroup.local # known firewalls #var.known_devices: #- serial_number: "1234567890123457" # hostname: "a.host.local" #- serial_number: "1234234590678557" # hostname: "b.host.local" utm: enabled: true # Set which input to use between udp (default), tcp or file. # var.input: udp # var.syslog_host: localhost # var.syslog_port: 9533 # Set paths for the log files when file input is used. # var.paths: # Toggle output of non-ECS fields (default true). # var.rsa_fields: true # Set custom timezone offset. # "local" (default) for system timezone. # "+02:00" for GMT+02:00 # var.tz_offset: local #-------------------------------- Squid Module -------------------------------- - module: squid log: enabled: true # Set which input to use between udp (default), tcp or file. # var.input: udp # var.syslog_host: localhost # var.syslog_port: 9520 # Set paths for the log files when file input is used. # var.paths: # Toggle output of non-ECS fields (default true). # var.rsa_fields: true # Set custom timezone offset. # "local" (default) for system timezone. # "+02:00" for GMT+02:00 # var.tz_offset: local #------------------------------- Suricata Module ------------------------------- - module: suricata # All logs eve: enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: #----------------------------- Threatintel Module ----------------------------- - module: threatintel abuseurl: enabled: true # Input used for ingesting threat intel data. var.input: httpjson # The URL used for Threat Intel API calls. var.url: https://urlhaus-api.abuse.ch/v1/urls/recent/ # The interval to poll the API for updates. var.interval: 10m abusemalware: enabled: true # Input used for ingesting threat intel data. var.input: httpjson # The URL used for Threat Intel API calls. var.url: https://urlhaus-api.abuse.ch/v1/payloads/recent/ # The interval to poll the API for updates. var.interval: 10m malwarebazaar: enabled: true # Input used for ingesting threat intel data. var.input: httpjson # The URL used for Threat Intel API calls. var.url: https://mb-api.abuse.ch/api/v1/ # The interval to poll the API for updates. var.interval: 10m misp: enabled: true # Input used for ingesting threat intel data, defaults to JSON. var.input: httpjson # The URL of the MISP instance, should end with "/events/restSearch". var.url: https://SERVER/events/restSearch # The authentication token used to contact the MISP API. Found when looking at user account in the MISP UI. var.api_token: API_KEY # Configures the type of SSL verification done, if MISP is running on self signed certificates # then the certificate would either need to be trusted, or verification_mode set to none. #var.ssl.verification_mode: none # Optional filters that can be applied to the API for filtering out results. This should support the majority of fields in a MISP context. # For examples please reference the filebeat module documentation. #var.filters: # - threat_level: [4, 5] # - to_ids: true # How far back to look once the beat starts up for the first time, the value has to be in hours. Each request afterwards will filter on any event newer # than the last event that was already ingested. var.first_interval: 300h # The interval to poll the API for updates. var.interval: 5m otx: enabled: true # Input used for ingesting threat intel data var.input: httpjson # The URL used for OTX Threat Intel API calls. var.url: https://otx.alienvault.com/api/v1/indicators/export # The authentication token used to contact the OTX API, can be found on the OTX UI. var.api_token: API_KEY # Optional filters that can be applied to retrieve only specific indicators. #var.types: "domain,IPv4,hostname,url,FileHash-SHA256" # The timeout of the HTTP client connecting to the OTX API #var.http_client_timeout: 120s # How many hours to look back for each request, should be close to the configured interval. Deduplication of events is handled by the module. var.lookback_range: 1h # How far back to look once the beat starts up for the first time, the value has to be in hours. var.first_interval: 400h # The interval to poll the API for updates var.interval: 5m anomali: enabled: true # Input used for ingesting threat intel data var.input: httpjson # The URL used for Threat Intel API calls. Limo has multiple different possibilities for URL's depending # on the type of threat intel source that is needed. var.url: https://limo.anomali.com/api/v1/taxii2/feeds/collections/41/objects # The Username used by anomali Limo, defaults to guest. #var.username: guest # The password used by anomali Limo, defaults to guest. #var.password: guest # How far back to look once the beat starts up for the first time, the value has to be in hours. var.first_interval: 400h # The interval to poll the API for updates var.interval: 5m anomalithreatstream: enabled: true # Input used for ingesting threat intel data var.input: http_endpoint # Address to bind to in order to receive HTTP requests # from the Integrator SDK. Use 0.0.0.0 to bind to all # existing interfaces. var.listen_address: localhost # Port to use to receive HTTP requests from the # Integrator SDK. var.listen_port: 8080 # Secret key to authenticate requests from the SDK. var.secret: '' # Uncomment the following and set the absolute paths # to the server SSL certificate and private key to # enable HTTPS secure connections. # # var.ssl_certificate: path/to/server_ssl_cert.pem # var.ssl_key: path/to/ssl_key.pem recordedfuture: enabled: true # Input used for ingesting threat intel data var.input: httpjson # The interval to poll the API for updates var.interval: 5m # How far back in time to start fetching intelligence when run for the # first time. Value must be in hours. Default: 168h (1 week). var.first_interval: 168h # The URL used for Threat Intel API calls. # Must include the `limit` parameter and at least `entity` and `timestamps` fields. # See the Connect API Explorer for a list of possible parameters. # # For `ip` entities: var.url: "https://api.recordedfuture.com/v2/ip/search?limit=200&fields=entity,timestamps,risk,intelCard,location&metadata=false" # For `domain` entities: # var.url: "https://api.recordedfuture.com/v2/domain/search?limit=200&fields=entity,timestamps,risk,intelCard,location&metadata=false" # For `hash` entities: # var.url: "https://api.recordedfuture.com/v2/hash/search?limit=200&fields=entity,fileHashes,timestamps,risk,intelCard,location&metadata=false" # For `url` entities: # var.url: "https://api.recordedfuture.com/v2/url/search?limit=200&fields=entity,timestamps,risk&metadata=false" # Set your API Token. var.api_token: "" #---------------------------- Apache Tomcat Module ---------------------------- - module: tomcat log: enabled: true # Set which input to use between udp (default), tcp or file. # var.input: udp # var.syslog_host: localhost # var.syslog_port: 9501 # Set paths for the log files when file input is used. # var.paths: # - /var/log/tomcat/*.log # Toggle output of non-ECS fields (default true). # var.rsa_fields: true # Set custom timezone offset. # "local" (default) for system timezone. # "+02:00" for GMT+02:00 # var.tz_offset: local #------------------------------- Traefik Module ------------------------------- #- module: traefik # Access logs #access: #enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: # Input configuration (advanced). Any input configuration option # can be added under this section. #input: #--------------------------------- Zeek Module --------------------------------- - module: zeek capture_loss: enabled: true connection: enabled: true dce_rpc: enabled: true dhcp: enabled: true dnp3: enabled: true dns: enabled: true dpd: enabled: true files: enabled: true ftp: enabled: true http: enabled: true intel: enabled: true irc: enabled: true kerberos: enabled: true modbus: enabled: true mysql: enabled: true notice: enabled: true ntp: enabled: true ntlm: enabled: true ocsp: enabled: true pe: enabled: true radius: enabled: true rdp: enabled: true rfb: enabled: true signature: enabled: true sip: enabled: true smb_cmd: enabled: true smb_files: enabled: true smb_mapping: enabled: true smtp: enabled: true snmp: enabled: true socks: enabled: true ssh: enabled: true ssl: enabled: true stats: enabled: true syslog: enabled: true traceroute: enabled: true tunnel: enabled: true weird: enabled: true x509: enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: #------------------------------ ZooKeeper Module ------------------------------ - module: zookeeper # All logs audit: enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: # All logs log: enabled: true # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: #--------------------------------- Zoom Module --------------------------------- - module: zoom webhook: enabled: true # The type of input to use #var.input: http_endpoint # The interface to listen for incoming HTTP requests. Defaults to # localhost. Set to 0.0.0.0 to bind to all available interfaces. #var.listen_address: localhost # The port to bind to #var.listen_port: 80 # The header Zoom uses to send its secret token, defaults to "Authorization" #secret.header: Authorization # The secret token value created by Zoom #secret.value: ZOOMTOKEN #----------------------------- Zscaler NSS Module ----------------------------- - module: zscaler zia: enabled: true # Set which input to use between udp (default), tcp or file. # var.input: udp # var.syslog_host: localhost # var.syslog_port: 9521 # Set paths for the log files when file input is used. # var.paths: # Toggle output of non-ECS fields (default true). # var.rsa_fields: true # Set custom timezone offset. # "local" (default) for system timezone. # "+02:00" for GMT+02:00 # var.tz_offset: local #=========================== Filebeat inputs ============================= # List of inputs to fetch data. filebeat.inputs: # Each - is an input. Most options can be set at the input level, so # you can use different inputs for various configurations. # Below are the input specific configurations. # Type of the files. Based on this the way the file is read is decided. # The different types cannot be mixed in one input # # Possible options are: # * log: Reads every line of the log file (default) # * filestream: Improved version of log input # * stdin: Reads the standard in #------------------------------ Log input -------------------------------- - type: log # Change to true to enable this input configuration. enabled: false # Paths that should be crawled and fetched. Glob based paths. # To fetch all ".log" files from a specific level of subdirectories # /var/log/*/*.log can be used. # For each file found under this path, a harvester is started. # Make sure not file is defined twice as this can lead to unexpected behaviour. paths: - /var/log/*.log #- c:\programdata\elasticsearch\logs\* # Configure the file encoding for reading files with international characters # following the W3C recommendation for HTML5 (http://www.w3.org/TR/encoding). # Some sample encodings: # plain, utf-8, utf-16be-bom, utf-16be, utf-16le, big5, gb18030, gbk, # hz-gb-2312, euc-kr, euc-jp, iso-2022-jp, shift-jis, ... #encoding: plain # Exclude lines. A list of regular expressions to match. It drops the lines that are # matching any regular expression from the list. The include_lines is called before # exclude_lines. By default, no lines are dropped. #exclude_lines: ['^DBG'] # Include lines. A list of regular expressions to match. It exports the lines that are # matching any regular expression from the list. The include_lines is called before # exclude_lines. By default, all the lines are exported. #include_lines: ['^ERR', '^WARN'] # Exclude files. A list of regular expressions to match. Filebeat drops the files that # are matching any regular expression from the list. By default, no files are dropped. #exclude_files: ['.gz$'] # Method to determine if two files are the same or not. By default # the Beat considers two files the same if their inode and device id are the same. #file_identity.native: ~ # Optional additional fields. These fields can be freely picked # to add additional information to the crawled log files for filtering #fields: # level: debug # review: 1 # Set to true to store the additional fields as top level fields instead # of under the "fields" sub-dictionary. In case of name conflicts with the # fields added by Filebeat itself, the custom fields overwrite the default # fields. #fields_under_root: false # Set to true to publish fields with null values in events. #keep_null: false # By default, all events contain `host.name`. This option can be set to true # to disable the addition of this field to all events. The default value is # false. #publisher_pipeline.disable_host: false # Ignore files which were modified more then the defined timespan in the past. # ignore_older is disabled by default, so no files are ignored by setting it to 0. # Time strings like 2h (2 hours), 5m (5 minutes) can be used. #ignore_older: 0 # How often the input checks for new files in the paths that are specified # for harvesting. Specify 1s to scan the directory as frequently as possible # without causing Filebeat to scan too frequently. Default: 10s. #scan_frequency: 10s # Defines the buffer size every harvester uses when fetching the file #harvester_buffer_size: 16384 # Maximum number of bytes a single log event can have # All bytes after max_bytes are discarded and not sent. The default is 10MB. # This is especially useful for multiline log messages which can get large. #max_bytes: 10485760 # Characters which separate the lines. Valid values: auto, line_feed, vertical_tab, form_feed, # carriage_return, carriage_return_line_feed, next_line, line_separator, paragraph_separator. #line_terminator: auto ### Recursive glob configuration # Expand "**" patterns into regular glob patterns. #recursive_glob.enabled: true ### JSON configuration # Decode JSON options. Enable this if your logs are structured in JSON. # JSON key on which to apply the line filtering and multiline settings. This key # must be top level and its value must be string, otherwise it is ignored. If # no text key is defined, the line filtering and multiline features cannot be used. #json.message_key: # By default, the decoded JSON is placed under a "json" key in the output document. # If you enable this setting, the keys are copied top level in the output document. #json.keys_under_root: false # If keys_under_root and this setting are enabled, then the values from the decoded # JSON object overwrite the fields that Filebeat normally adds (type, source, offset, etc.) # in case of conflicts. #json.overwrite_keys: false # If this setting is enabled, then keys in the decoded JSON object will be recursively # de-dotted, and expanded into a hierarchical object structure. # For example, `{"a.b.c": 123}` would be expanded into `{"a":{"b":{"c":123}}}`. #json.expand_keys: false # If this setting is enabled, Filebeat adds a "error.message" and "error.key: json" key in case of JSON # unmarshaling errors or when a text key is defined in the configuration but cannot # be used. #json.add_error_key: false ### Multiline options # Multiline can be used for log messages spanning multiple lines. This is common # for Java Stack Traces or C-Line Continuation # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ #multiline.pattern: ^\[ # Defines if the pattern set under pattern should be negated or not. Default is false. #multiline.negate: false # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern # that was (not) matched before or after or as long as a pattern is not matched based on negate. # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash #multiline.match: after # The maximum number of lines that are combined to one event. # In case there are more the max_lines the additional lines are discarded. # Default is 500 #multiline.max_lines: 500 # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event # Default is 5s. #multiline.timeout: 5s # To aggregate constant number of lines into a single event use the count mode of multiline. #multiline.type: count # The number of lines to aggregate into a single event. #multiline.count_lines: 3 # Do not add new line character when concatenating lines. #multiline.skip_newline: false # Setting tail_files to true means filebeat starts reading new files at the end # instead of the beginning. If this is used in combination with log rotation # this can mean that the first entries of a new file are skipped. #tail_files: false # The Ingest Node pipeline ID associated with this input. If this is set, it # overwrites the pipeline option from the Elasticsearch output. #pipeline: # If symlinks is enabled, symlinks are opened and harvested. The harvester is opening the # original for harvesting but will report the symlink name as source. #symlinks: false # Backoff values define how aggressively filebeat crawls new files for updates # The default values can be used in most cases. Backoff defines how long it is waited # to check a file again after EOF is reached. Default is 1s which means the file # is checked every second if new lines were added. This leads to a near real time crawling. # Every time a new line appears, backoff is reset to the initial value. #backoff: 1s # Max backoff defines what the maximum backoff time is. After having backed off multiple times # from checking the files, the waiting time will never exceed max_backoff independent of the # backoff factor. Having it set to 10s means in the worst case a new line can be added to a log # file after having backed off multiple times, it takes a maximum of 10s to read the new line #max_backoff: 10s # The backoff factor defines how fast the algorithm backs off. The bigger the backoff factor, # the faster the max_backoff value is reached. If this value is set to 1, no backoff will happen. # The backoff value will be multiplied each time with the backoff_factor until max_backoff is reached #backoff_factor: 2 # Max number of harvesters that are started in parallel. # Default is 0 which means unlimited #harvester_limit: 0 ### Harvester closing options # Close inactive closes the file handler after the predefined period. # The period starts when the last line of the file was, not the file ModTime. # Time strings like 2h (2 hours), 5m (5 minutes) can be used. #close_inactive: 5m # Close renamed closes a file handler when the file is renamed or rotated. # Note: Potential data loss. Make sure to read and understand the docs for this option. #close_renamed: false # When enabling this option, a file handler is closed immediately in case a file can't be found # any more. In case the file shows up again later, harvesting will continue at the last known position # after scan_frequency. #close_removed: true # Closes the file handler as soon as the harvesters reaches the end of the file. # By default this option is disabled. # Note: Potential data loss. Make sure to read and understand the docs for this option. #close_eof: false ### State options # Files for the modification data is older then clean_inactive the state from the registry is removed # By default this is disabled. #clean_inactive: 0 # Removes the state for file which cannot be found on disk anymore immediately #clean_removed: true # Close timeout closes the harvester after the predefined time. # This is independent if the harvester did finish reading the file or not. # By default this option is disabled. # Note: Potential data loss. Make sure to read and understand the docs for this option. #close_timeout: 0 # Defines if inputs is enabled #enabled: true #--------------------------- Filestream input ---------------------------- - type: filestream # Change to true to enable this input configuration. enabled: false # Paths that should be crawled and fetched. Glob based paths. # To fetch all ".log" files from a specific level of subdirectories # /var/log/*/*.log can be used. # For each file found under this path, a harvester is started. # Make sure not file is defined twice as this can lead to unexpected behaviour. paths: - /var/log/*.log #- c:\programdata\elasticsearch\logs\* # Configure the file encoding for reading files with international characters # following the W3C recommendation for HTML5 (http://www.w3.org/TR/encoding). # Some sample encodings: # plain, utf-8, utf-16be-bom, utf-16be, utf-16le, big5, gb18030, gbk, # hz-gb-2312, euc-kr, euc-jp, iso-2022-jp, shift-jis, ... #encoding: plain # Exclude lines. A list of regular expressions to match. It drops the lines that are # matching any regular expression from the list. The include_lines is called before # exclude_lines. By default, no lines are dropped. #exclude_lines: ['^DBG'] # Include lines. A list of regular expressions to match. It exports the lines that are # matching any regular expression from the list. The include_lines is called before # exclude_lines. By default, all the lines are exported. #include_lines: ['^ERR', '^WARN'] ### Prospector options # How often the input checks for new files in the paths that are specified # for harvesting. Specify 1s to scan the directory as frequently as possible # without causing Filebeat to scan too frequently. Default: 10s. #prospector.scanner.check_interval: 10s # Exclude files. A list of regular expressions to match. Filebeat drops the files that # are matching any regular expression from the list. By default, no files are dropped. #prospector.scanner.exclude_files: ['.gz$'] # Include files. A list of regular expressions to match. Filebeat keeps only the files that # are matching any regular expression from the list. By default, no files are dropped. #prospector.scanner.include_files: ['/var/log/.*'] # Expand "**" patterns into regular glob patterns. #prospector.scanner.recursive_glob: true # If symlinks is enabled, symlinks are opened and harvested. The harvester is opening the # original for harvesting but will report the symlink name as source. #prospector.scanner.symlinks: false ### Log rotation # When an external tool rotates the input files with copytruncate strategy # use this section to help the input find the rotated files. #rotation.external.strategy.copytruncate: # Regex that matches the rotated files. # suffix_regex: \.\d$ # If the rotated filename suffix is a datetime, set it here. # dateformat: -20060102 ### State options # Files for the modification data is older then clean_inactive the state from the registry is removed # By default this is disabled. #clean_inactive: 0 # Removes the state for file which cannot be found on disk anymore immediately #clean_removed: true # Method to determine if two files are the same or not. By default # the Beat considers two files the same if their inode and device id are the same. #file_identity.native: ~ # Optional additional fields. These fields can be freely picked # to add additional information to the crawled log files for filtering #fields: # level: debug # review: 1 # Set to true to publish fields with null values in events. #keep_null: false # By default, all events contain `host.name`. This option can be set to true # to disable the addition of this field to all events. The default value is # false. #publisher_pipeline.disable_host: false # Ignore files which were modified more then the defined timespan in the past. # ignore_older is disabled by default, so no files are ignored by setting it to 0. # Time strings like 2h (2 hours), 5m (5 minutes) can be used. #ignore_older: 0 # Ignore files that have not been updated since the selected event. # ignore_inactive is disabled by default, so no files are ignored by setting it to "". # Available options: since_first_start, since_last_start. #ignore_inactive: "" # Defines the buffer size every harvester uses when fetching the file #harvester_buffer_size: 16384 # Maximum number of bytes a single log event can have # All bytes after max_bytes are discarded and not sent. The default is 10MB. # This is especially useful for multiline log messages which can get large. #message_max_bytes: 10485760 # Characters which separate the lines. Valid values: auto, line_feed, vertical_tab, form_feed, # carriage_return, carriage_return_line_feed, next_line, line_separator, paragraph_separator. #line_terminator: auto # The Ingest Node pipeline ID associated with this input. If this is set, it # overwrites the pipeline option from the Elasticsearch output. #pipeline: # Backoff values define how aggressively filebeat crawls new files for updates # The default values can be used in most cases. Backoff defines how long it is waited # to check a file again after EOF is reached. Default is 1s which means the file # is checked every second if new lines were added. This leads to a near real time crawling. # Every time a new line appears, backoff is reset to the initial value. #backoff.init: 1s # Max backoff defines what the maximum backoff time is. After having backed off multiple times # from checking the files, the waiting time will never exceed max_backoff independent of the # backoff factor. Having it set to 10s means in the worst case a new line can be added to a log # file after having backed off multiple times, it takes a maximum of 10s to read the new line #backoff.max: 10s ### Harvester closing options # Close inactive closes the file handler after the predefined period. # The period starts when the last line of the file was, not the file ModTime. # Time strings like 2h (2 hours), 5m (5 minutes) can be used. #close.on_state_change.inactive: 5m # Close renamed closes a file handler when the file is renamed or rotated. # Note: Potential data loss. Make sure to read and understand the docs for this option. #close.on_state_change.renamed: false # When enabling this option, a file handler is closed immediately in case a file can't be found # any more. In case the file shows up again later, harvesting will continue at the last known position # after scan_frequency. #close.on_state_change.removed: true # Closes the file handler as soon as the harvesters reaches the end of the file. # By default this option is disabled. # Note: Potential data loss. Make sure to read and understand the docs for this option. #close.reader.eof: false # Close timeout closes the harvester after the predefined time. # This is independent if the harvester did finish reading the file or not. # By default this option is disabled. # Note: Potential data loss. Make sure to read and understand the docs for this option. #close.reader.after_interval: 0 #----------------------------- Stdin input ------------------------------- # Configuration to use stdin input #- type: stdin #------------------------- Redis slowlog input --------------------------- # Experimental: Config options for the redis slow log input #- type: redis #enabled: false # List of hosts to pool to retrieve the slow log information. #hosts: ["localhost:6379"] # How often the input checks for redis slow log. #scan_frequency: 10s # Timeout after which time the input should return an error #timeout: 1s # Network type to be used for redis connection. Default: tcp #network: tcp # Max number of concurrent connections. Default: 10 #maxconn: 10 # Redis AUTH password. Empty by default. #password: foobared #------------------------------ Udp input -------------------------------- # Experimental: Config options for the udp input #- type: udp #enabled: false # Maximum size of the message received over UDP #max_message_size: 10KiB # Size of the UDP read buffer in bytes #read_buffer: 0 #------------------------------ TCP input -------------------------------- # Experimental: Config options for the TCP input #- type: tcp #enabled: false # The host and port to receive the new event #host: "localhost:9000" # Character used to split new message #line_delimiter: "\n" # Maximum size in bytes of the message received over TCP #max_message_size: 20MiB # Max number of concurrent connections, or 0 for no limit. Default: 0 #max_connections: 0 # The number of seconds of inactivity before a remote connection is closed. #timeout: 300s # Use SSL settings for TCP. #ssl.enabled: true # List of supported/valid TLS versions. By default all TLS versions 1.0 up to # 1.2 are enabled. #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] # SSL configuration. By default is off. # List of root certificates for client verifications #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] # Certificate for SSL server authentication. #ssl.certificate: "/etc/pki/client/cert.pem" # Server Certificate Key, #ssl.key: "/etc/pki/client/cert.key" # Optional passphrase for decrypting the Certificate Key. #ssl.key_passphrase: '' # Configure cipher suites to be used for SSL connections. #ssl.cipher_suites: [] # Configure curve types for ECDHE based cipher suites. #ssl.curve_types: [] # Configure what types of client authentication are supported. Valid options # are `none`, `optional`, and `required`. When `certificate_authorities` is set it will # default to `required` otherwise it will be set to `none`. #ssl.client_authentication: "required" #------------------------------ Syslog input -------------------------------- # Accept RFC3164 formatted syslog event via UDP. #- type: syslog #enabled: false #format: rfc3164 #protocol.udp: # The host and port to receive the new event #host: "localhost:9000" # Maximum size of the message received over UDP #max_message_size: 10KiB # Accept RFC5424 formatted syslog event via TCP. #- type: syslog #enabled: false #format: rfc5424 #protocol.tcp: # The host and port to receive the new event #host: "localhost:9000" # Character used to split new message #line_delimiter: "\n" # Maximum size in bytes of the message received over TCP #max_message_size: 20MiB # The number of seconds of inactivity before a remote connection is closed. #timeout: 300s # Use SSL settings for TCP. #ssl.enabled: true # List of supported/valid TLS versions. By default all TLS versions 1.0 up to # 1.2 are enabled. #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] # SSL configuration. By default is off. # List of root certificates for client verifications #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] # Certificate for SSL server authentication. #ssl.certificate: "/etc/pki/client/cert.pem" # Server Certificate Key, #ssl.key: "/etc/pki/client/cert.key" # Optional passphrase for decrypting the Certificate Key. #ssl.key_passphrase: '' # Configure cipher suites to be used for SSL connections. #ssl.cipher_suites: [] # Configure curve types for ECDHE based cipher suites. #ssl.curve_types: [] # Configure what types of client authentication are supported. Valid options # are `none`, `optional`, and `required`. When `certificate_authorities` is set it will # default to `required` otherwise it will be set to `none`. #ssl.client_authentication: "required" #------------------------------ Container input -------------------------------- #- type: container #enabled: false # Paths for container logs that should be crawled and fetched. #paths: # -/var/lib/docker/containers/*/*.log # Configure stream to filter to a specific stream: stdout, stderr or all (default) #stream: all #------------------------------ NetFlow input -------------------------------- # Experimental: Config options for the Netflow/IPFIX collector over UDP input #- type: netflow #enabled: false # Address where the NetFlow Collector will bind #host: ":2055" # Maximum size of the message received over UDP #max_message_size: 10KiB # List of enabled protocols. # Valid values are 'v1', 'v5', 'v6', 'v7', 'v8', 'v9' and 'ipfix' #protocols: [ v5, v9, ipfix ] # Expiration timeout # This is the time before an idle session or unused template is expired. # Only applicable to v9 and ipfix protocols. A value of zero disables expiration. #expiration_timeout: 30m # Queue size limits the number of netflow packets that are queued awaiting # processing. #queue_size: 8192 # Custom field definitions for NetFlow V9 / IPFIX. # List of files with YAML fields definition. #custom_definitions: #- path/to/ipfix.yaml #- path/to/netflow.yaml #---------------------------- Google Cloud Pub/Sub Input ----------------------- # Input for reading messages from a Google Cloud Pub/Sub topic subscription. - type: gcp-pubsub enabled: false # Google Cloud project ID. Required. project_id: my-gcp-project-id # Google Cloud Pub/Sub topic name. Required. topic: my-gcp-pubsub-topic-name # Google Cloud Pub/Sub topic subscription name. Required. subscription.name: my-gcp-pubsub-subscription-name # Create subscription if it does not exist. #subscription.create: true # Number of goroutines to create to read from the subscription. #subscription.num_goroutines: 1 # Maximum number of unprocessed messages to allow at any time. #subscription.max_outstanding_messages: 1000 # Path to a JSON file containing the credentials and key used to subscribe. credentials_file: ${path.config}/my-pubsub-subscriber-credentials.json #------------------------------ AWS S3 input -------------------------------- # Beta: Config options for AWS S3 input #- type: aws-s3 #enabled: false # AWS Credentials # If access_key_id and secret_access_key are configured, then use them to make api calls. # If not, aws-s3 input will load default AWS config or load with given profile name. #access_key_id: '${AWS_ACCESS_KEY_ID:""}' #secret_access_key: '${AWS_SECRET_ACCESS_KEY:""}' #session_token: '${AWS_SESSION_TOKEN:"”}' #credential_profile_name: test-aws-s3-input # SQS queue URL to receive messages from (required). #queue_url: "https://sqs.us-east-1.amazonaws.com/1234/test-aws-s3-logs-queue" # Maximum number of SQS messages that can be inflight at any time. #max_number_of_messages: 5 # Maximum duration of an AWS API call (excluding S3 GetObject calls). #api_timeout: 120s # Duration that received SQS messages are hidden from subsequent # requests after being retrieved by a ReceiveMessage request. #visibility_timeout: 300s # List of S3 object metadata keys to include in events. #include_s3_metadata: [] # The max number of times an SQS message should be received (retried) before deleting it. #sqs.max_receive_count: 5 # Maximum duration for which the SQS ReceiveMessage call waits for a message # to arrive in the queue before returning. #sqs.wait_time: 20s #------------------------------ AWS CloudWatch input -------------------------------- # Beta: Config options for AWS CloudWatch input #- type: aws-cloudwatch #enabled: false # AWS Credentials # If access_key_id and secret_access_key are configured, then use them to make api calls. # If not, aws-cloudwatch input will load default AWS config or load with given profile name. #access_key_id: '${AWS_ACCESS_KEY_ID:""}' #secret_access_key: '${AWS_SECRET_ACCESS_KEY:""}' #session_token: '${AWS_SESSION_TOKEN:"”}' #credential_profile_name: test-aws-s3-input # ARN of the log group to collect logs from #log_group_arn: "arn:aws:logs:us-east-1:428152502467:log-group:test:*" # Name of the log group to collect logs from. # Note: region_name is required when log_group_name is given. #log_group_name: test # The prefix for a group of log group names. # Note: `region_name` is required when `log_group_name_prefix` is given. # `log_group_name` and `log_group_name_prefix` cannot be given at the same time. #log_group_name_prefix: /aws/ # Region that the specified log group or log group prefix belongs to. #region_name: us-east-1 # A list of strings of log streams names that Filebeat collect log events from. #log_streams: # - log_stream_name # A string to filter the results to include only log events from log streams # that have names starting with this prefix. #log_stream_prefix: test # `start_position` allows user to specify if this input should read log files # from the `beginning` or from the `end`. # `beginning`: reads from the beginning of the log group (default). # `end`: read only new messages from current time minus `scan_frequency` going forward. #start_position: beginning # This config parameter sets how often Filebeat checks for new log events from the # specified log group. Default `scan_frequency` is 1 minute, which means Filebeat # will sleep for 1 minute before querying for new logs again. #scan_frequency: 1m # The maximum duration of AWS API can take. If it exceeds the timeout, AWS API # will be interrupted. # The default AWS API timeout for a message is 120 seconds. # The minimum is 0 seconds. #api_timeout: 120s # This is used to sleep between AWS `FilterLogEvents` API calls inside the same # collection period. #api_sleep: 200ms # =========================== Filebeat autodiscover ============================ # Autodiscover allows you to detect changes in the system and spawn new modules # or inputs as they happen. #filebeat.autodiscover: # List of enabled autodiscover providers # providers: # - type: docker # templates: # - condition: # equals.docker.container.image: busybox # config: # - type: container # paths: # - /var/lib/docker/containers/${data.docker.container.id}/*.log # ========================== Filebeat global options =========================== # Registry data path. If a relative path is used, it is considered relative to the # data path. #filebeat.registry.path: ${path.data}/registry # The permissions mask to apply on registry data, and meta files. The default # value is 0600. Must be a valid Unix-style file permissions mask expressed in # octal notation. This option is not supported on Windows. #filebeat.registry.file_permissions: 0600 # The timeout value that controls when registry entries are written to disk # (flushed). When an unwritten update exceeds this value, it triggers a write # to disk. When flush is set to 0s, the registry is written to disk after each # batch of events has been published successfully. The default value is 0s. #filebeat.registry.flush: 0s # Starting with Filebeat 7.0, the registry uses a new directory format to store # Filebeat state. After you upgrade, Filebeat will automatically migrate a 6.x # registry file to use the new directory format. If you changed # filebeat.registry.path while upgrading, set filebeat.registry.migrate_file to # point to the old registry file. #filebeat.registry.migrate_file: ${path.data}/registry # By default Ingest pipelines are not updated if a pipeline with the same ID # already exists. If this option is enabled Filebeat overwrites pipelines # everytime a new Elasticsearch connection is established. #filebeat.overwrite_pipelines: false # How long filebeat waits on shutdown for the publisher to finish. # Default is 0, not waiting. #filebeat.shutdown_timeout: 0 # Enable filebeat config reloading #filebeat.config: #inputs: #enabled: false #path: inputs.d/*.yml #reload.enabled: true #reload.period: 10s #modules: #enabled: false #path: modules.d/*.yml #reload.enabled: true #reload.period: 10s # ================================== General =================================== # The name of the shipper that publishes the network data. It can be used to group # all the transactions sent by a single shipper in the web interface. # If this options is not defined, the hostname is used. #name: # The tags of the shipper are included in their own field with each # transaction published. Tags make it easy to group servers by different # logical properties. #tags: ["service-X", "web-tier"] # Optional fields that you can specify to add additional information to the # output. Fields can be scalar values, arrays, dictionaries, or any nested # combination of these. #fields: # env: staging # If this option is set to true, the custom fields are stored as top-level # fields in the output document instead of being grouped under a fields # sub-dictionary. Default is false. #fields_under_root: false # Internal queue configuration for buffering events to be published. #queue: # Queue type by name (default 'mem') # The memory queue will present all available events (up to the outputs # bulk_max_size) to the output, the moment the output is ready to server # another batch of events. #mem: # Max number of events the queue can buffer. #events: 4096 # Hints the minimum number of events stored in the queue, # before providing a batch of events to the outputs. # The default value is set to 2048. # A value of 0 ensures events are immediately available # to be sent to the outputs. #flush.min_events: 2048 # Maximum duration after which events are available to the outputs, # if the number of events stored in the queue is < `flush.min_events`. #flush.timeout: 1s # The disk queue stores incoming events on disk until the output is # ready for them. This allows a higher event limit than the memory-only # queue and lets pending events persist through a restart. #disk: # The directory path to store the queue's data. #path: "${path.data}/diskqueue" # The maximum space the queue should occupy on disk. Depending on # input settings, events that exceed this limit are delayed or discarded. #max_size: 10GB # The maximum size of a single queue data file. Data in the queue is # stored in smaller segments that are deleted after all their events # have been processed. #segment_size: 1GB # The number of events to read from disk to memory while waiting for # the output to request them. #read_ahead: 512 # The number of events to accept from inputs while waiting for them # to be written to disk. If event data arrives faster than it # can be written to disk, this setting prevents it from overflowing # main memory. #write_ahead: 2048 # The duration to wait before retrying when the queue encounters a disk # write error. #retry_interval: 1s # The maximum length of time to wait before retrying on a disk write # error. If the queue encounters repeated errors, it will double the # length of its retry interval each time, up to this maximum. #max_retry_interval: 30s # The spool queue will store events in a local spool file, before # forwarding the events to the outputs. # Note: the spool queue is deprecated and will be removed in the future. # Use the disk queue instead. # # The spool file is a circular buffer, which blocks once the file/buffer is full. # Events are put into a write buffer and flushed once the write buffer # is full or the flush_timeout is triggered. # Once ACKed by the output, events are removed immediately from the queue, # making space for new events to be persisted. #spool: # The file namespace configures the file path and the file creation settings. # Once the file exists, the `size`, `page_size` and `prealloc` settings # will have no more effect. #file: # Location of spool file. The default value is ${path.data}/spool.dat. #path: "${path.data}/spool.dat" # Configure file permissions if file is created. The default value is 0600. #permissions: 0600 # File size hint. The spool blocks, once this limit is reached. The default value is 100 MiB. #size: 100MiB # The files page size. A file is split into multiple pages of the same size. The default value is 4KiB. #page_size: 4KiB # If prealloc is set, the required space for the file is reserved using # truncate. The default value is true. #prealloc: true # Spool writer settings # Events are serialized into a write buffer. The write buffer is flushed if: # - The buffer limit has been reached. # - The configured limit of buffered events is reached. # - The flush timeout is triggered. #write: # Sets the write buffer size. #buffer_size: 1MiB # Maximum duration after which events are flushed if the write buffer # is not full yet. The default value is 1s. #flush.timeout: 1s # Number of maximum buffered events. The write buffer is flushed once the # limit is reached. #flush.events: 16384 # Configure the on-disk event encoding. The encoding can be changed # between restarts. # Valid encodings are: json, ubjson, and cbor. #codec: cbor #read: # Reader flush timeout, waiting for more events to become available, so # to fill a complete batch as required by the outputs. # If flush_timeout is 0, all available events are forwarded to the # outputs immediately. # The default value is 0s. #flush.timeout: 0s # Sets the maximum number of CPUs that can be executing simultaneously. The # default is the number of logical CPUs available in the system. #max_procs: # ================================= Processors ================================= # Processors are used to reduce the number of fields in the exported event or to # enhance the event with external metadata. This section defines a list of # processors that are applied one by one and the first one receives the initial # event: # # event -> filter1 -> event1 -> filter2 ->event2 ... # # The supported processors are drop_fields, drop_event, include_fields, # decode_json_fields, and add_cloud_metadata. # # For example, you can use the following processors to keep the fields that # contain CPU load percentages, but remove the fields that contain CPU ticks # values: # #processors: # - include_fields: # fields: ["cpu"] # - drop_fields: # fields: ["cpu.user", "cpu.system"] # # The following example drops the events that have the HTTP response code 200: # #processors: # - drop_event: # when: # equals: # http.code: 200 # # The following example renames the field a to b: # #processors: # - rename: # fields: # - from: "a" # to: "b" # # The following example tokenizes the string into fields: # #processors: # - dissect: # tokenizer: "%{key1} - %{key2}" # field: "message" # target_prefix: "dissect" # # The following example enriches each event with metadata from the cloud # provider about the host machine. It works on EC2, GCE, DigitalOcean, # Tencent Cloud, and Alibaba Cloud. # #processors: # - add_cloud_metadata: ~ # # The following example enriches each event with the machine's local time zone # offset from UTC. # #processors: # - add_locale: # format: offset # # The following example enriches each event with docker metadata, it matches # given fields to an existing container id and adds info from that container: # #processors: # - add_docker_metadata: # host: "unix:///var/run/docker.sock" # match_fields: ["system.process.cgroup.id"] # match_pids: ["process.pid", "process.ppid"] # match_source: true # match_source_index: 4 # match_short_id: false # cleanup_timeout: 60 # labels.dedot: false # # To connect to Docker over TLS you must specify a client and CA certificate. # #ssl: # # certificate_authority: "/etc/pki/root/ca.pem" # # certificate: "/etc/pki/client/cert.pem" # # key: "/etc/pki/client/cert.key" # # The following example enriches each event with docker metadata, it matches # container id from log path available in `source` field (by default it expects # it to be /var/lib/docker/containers/*/*.log). # #processors: # - add_docker_metadata: ~ # # The following example enriches each event with host metadata. # #processors: # - add_host_metadata: ~ # # The following example enriches each event with process metadata using # process IDs included in the event. # #processors: # - add_process_metadata: # match_pids: ["system.process.ppid"] # target: system.process.parent # # The following example decodes fields containing JSON strings # and replaces the strings with valid JSON objects. # #processors: # - decode_json_fields: # fields: ["field1", "field2", ...] # process_array: false # max_depth: 1 # target: "" # overwrite_keys: false # #processors: # - decompress_gzip_field: # from: "field1" # to: "field2" # ignore_missing: false # fail_on_error: true # # The following example copies the value of message to message_copied # #processors: # - copy_fields: # fields: # - from: message # to: message_copied # fail_on_error: true # ignore_missing: false # # The following example truncates the value of message to 1024 bytes # #processors: # - truncate_fields: # fields: # - message # max_bytes: 1024 # fail_on_error: false # ignore_missing: true # # The following example preserves the raw message under event.original # #processors: # - copy_fields: # fields: # - from: message # to: event.original # fail_on_error: false # ignore_missing: true # - truncate_fields: # fields: # - event.original # max_bytes: 1024 # fail_on_error: false # ignore_missing: true # # The following example URL-decodes the value of field1 to field2 # #processors: # - urldecode: # fields: # - from: "field1" # to: "field2" # ignore_missing: false # fail_on_error: true # =============================== Elastic Cloud ================================ # These settings simplify using Filebeat with the Elastic Cloud (https://cloud.elastic.co/). # The cloud.id setting overwrites the `output.elasticsearch.hosts` and # `setup.kibana.host` options. # You can find the `cloud.id` in the Elastic Cloud web UI. #cloud.id: # The cloud.auth setting overwrites the `output.elasticsearch.username` and # `output.elasticsearch.password` settings. The format is `:`. #cloud.auth: # ================================== Outputs =================================== # Configure what output to use when sending the data collected by the beat. # ---------------------------- Elasticsearch Output ---------------------------- output.elasticsearch: # Boolean flag to enable or disable the output module. #enabled: true # Array of hosts to connect to. # Scheme and port can be left out and will be set to the default (http and 9200) # In case you specify and additional path, the scheme is required: http://localhost:9200/path # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 hosts: ["localhost:9200"] # Set gzip compression level. #compression_level: 0 # Configure escaping HTML symbols in strings. #escape_html: false # Protocol - either `http` (default) or `https`. #protocol: "https" # Authentication credentials - either API key or username/password. #api_key: "id:api_key" #username: "elastic" #password: "changeme" # Dictionary of HTTP parameters to pass within the URL with index operations. #parameters: #param1: value1 #param2: value2 # Number of workers per Elasticsearch host. #worker: 1 # Optional index name. The default is "filebeat" plus date # and generates [filebeat-]YYYY.MM.DD keys. # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. #index: "filebeat-%{[agent.version]}-%{+yyyy.MM.dd}" # Optional ingest node pipeline. By default no pipeline will be used. #pipeline: "" # Optional HTTP path #path: "/elasticsearch" # Custom HTTP headers to add to each request #headers: # X-My-Header: Contents of the header # Proxy server URL #proxy_url: http://proxy:3128 # Whether to disable proxy settings for outgoing connections. If true, this # takes precedence over both the proxy_url field and any environment settings # (HTTP_PROXY, HTTPS_PROXY). The default is false. #proxy_disable: false # The number of times a particular Elasticsearch index operation is attempted. If # the indexing operation doesn't succeed after this many retries, the events are # dropped. The default is 3. #max_retries: 3 # The maximum number of events to bulk in a single Elasticsearch bulk API index request. # The default is 50. #bulk_max_size: 50 # The number of seconds to wait before trying to reconnect to Elasticsearch # after a network error. After waiting backoff.init seconds, the Beat # tries to reconnect. If the attempt fails, the backoff timer is increased # exponentially up to backoff.max. After a successful connection, the backoff # timer is reset. The default is 1s. #backoff.init: 1s # The maximum number of seconds to wait before attempting to connect to # Elasticsearch after a network error. The default is 60s. #backoff.max: 60s # Configure HTTP request timeout before failing a request to Elasticsearch. #timeout: 90 # Use SSL settings for HTTPS. #ssl.enabled: true # Controls the verification of certificates. Valid values are: # * full, which verifies that the provided certificate is signed by a trusted # authority (CA) and also verifies that the server's hostname (or IP address) # matches the names identified within the certificate. # * strict, which verifies that the provided certificate is signed by a trusted # authority (CA) and also verifies that the server's hostname (or IP address) # matches the names identified within the certificate. If the Subject Alternative # Name is empty, it returns an error. # * certificate, which verifies that the provided certificate is signed by a # trusted authority (CA), but does not perform any hostname verification. # * none, which performs no verification of the server's certificate. This # mode disables many of the security benefits of SSL/TLS and should only be used # after very careful consideration. It is primarily intended as a temporary # diagnostic mechanism when attempting to resolve TLS errors; its use in # production environments is strongly discouraged. # The default value is full. #ssl.verification_mode: full # List of supported/valid TLS versions. By default all TLS versions from 1.1 # up to 1.3 are enabled. #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] # List of root certificates for HTTPS server verifications #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] # Certificate for SSL client authentication #ssl.certificate: "/etc/pki/client/cert.pem" # Client certificate key #ssl.key: "/etc/pki/client/cert.key" # Optional passphrase for decrypting the certificate key. #ssl.key_passphrase: '' # Configure cipher suites to be used for SSL connections #ssl.cipher_suites: [] # Configure curve types for ECDHE-based cipher suites #ssl.curve_types: [] # Configure what types of renegotiation are supported. Valid options are # never, once, and freely. Default is never. #ssl.renegotiation: never # Configure a pin that can be used to do extra validation of the verified certificate chain, # this allow you to ensure that a specific certificate is used to validate the chain of trust. # # The pin is a base64 encoded string of the SHA-256 fingerprint. #ssl.ca_sha256: "" # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. #kerberos.enabled: true # Authentication type to use with Kerberos. Available options: keytab, password. #kerberos.auth_type: password # Path to the keytab file. It is used when auth_type is set to keytab. #kerberos.keytab: /etc/elastic.keytab # Path to the Kerberos configuration. #kerberos.config_path: /etc/krb5.conf # Name of the Kerberos user. #kerberos.username: elastic # Password of the Kerberos user. It is used when auth_type is set to password. #kerberos.password: changeme # Kerberos realm. #kerberos.realm: ELASTIC # ------------------------------ Logstash Output ------------------------------- #output.logstash: # Boolean flag to enable or disable the output module. #enabled: true # The Logstash hosts #hosts: ["localhost:5044"] # Number of workers per Logstash host. #worker: 1 # Set gzip compression level. #compression_level: 3 # Configure escaping HTML symbols in strings. #escape_html: false # Optional maximum time to live for a connection to Logstash, after which the # connection will be re-established. A value of `0s` (the default) will # disable this feature. # # Not yet supported for async connections (i.e. with the "pipelining" option set) #ttl: 30s # Optionally load-balance events between Logstash hosts. Default is false. #loadbalance: false # Number of batches to be sent asynchronously to Logstash while processing # new batches. #pipelining: 2 # If enabled only a subset of events in a batch of events is transferred per # transaction. The number of events to be sent increases up to `bulk_max_size` # if no error is encountered. #slow_start: false # The number of seconds to wait before trying to reconnect to Logstash # after a network error. After waiting backoff.init seconds, the Beat # tries to reconnect. If the attempt fails, the backoff timer is increased # exponentially up to backoff.max. After a successful connection, the backoff # timer is reset. The default is 1s. #backoff.init: 1s # The maximum number of seconds to wait before attempting to connect to # Logstash after a network error. The default is 60s. #backoff.max: 60s # Optional index name. The default index name is set to filebeat # in all lowercase. #index: 'filebeat' # SOCKS5 proxy server URL #proxy_url: socks5://user:password@socks5-server:2233 # Resolve names locally when using a proxy server. Defaults to false. #proxy_use_local_resolver: false # Use SSL settings for HTTPS. #ssl.enabled: true # Controls the verification of certificates. Valid values are: # * full, which verifies that the provided certificate is signed by a trusted # authority (CA) and also verifies that the server's hostname (or IP address) # matches the names identified within the certificate. # * strict, which verifies that the provided certificate is signed by a trusted # authority (CA) and also verifies that the server's hostname (or IP address) # matches the names identified within the certificate. If the Subject Alternative # Name is empty, it returns an error. # * certificate, which verifies that the provided certificate is signed by a # trusted authority (CA), but does not perform any hostname verification. # * none, which performs no verification of the server's certificate. This # mode disables many of the security benefits of SSL/TLS and should only be used # after very careful consideration. It is primarily intended as a temporary # diagnostic mechanism when attempting to resolve TLS errors; its use in # production environments is strongly discouraged. # The default value is full. #ssl.verification_mode: full # List of supported/valid TLS versions. By default all TLS versions from 1.1 # up to 1.3 are enabled. #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] # List of root certificates for HTTPS server verifications #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] # Certificate for SSL client authentication #ssl.certificate: "/etc/pki/client/cert.pem" # Client certificate key #ssl.key: "/etc/pki/client/cert.key" # Optional passphrase for decrypting the certificate key. #ssl.key_passphrase: '' # Configure cipher suites to be used for SSL connections #ssl.cipher_suites: [] # Configure curve types for ECDHE-based cipher suites #ssl.curve_types: [] # Configure what types of renegotiation are supported. Valid options are # never, once, and freely. Default is never. #ssl.renegotiation: never # Configure a pin that can be used to do extra validation of the verified certificate chain, # this allow you to ensure that a specific certificate is used to validate the chain of trust. # # The pin is a base64 encoded string of the SHA-256 fingerprint. #ssl.ca_sha256: "" # The number of times to retry publishing an event after a publishing failure. # After the specified number of retries, the events are typically dropped. # Some Beats, such as Filebeat and Winlogbeat, ignore the max_retries setting # and retry until all events are published. Set max_retries to a value less # than 0 to retry until all events are published. The default is 3. #max_retries: 3 # The maximum number of events to bulk in a single Logstash request. The # default is 2048. #bulk_max_size: 2048 # The number of seconds to wait for responses from the Logstash server before # timing out. The default is 30s. #timeout: 30s # -------------------------------- Kafka Output -------------------------------- #output.kafka: # Boolean flag to enable or disable the output module. #enabled: true # The list of Kafka broker addresses from which to fetch the cluster metadata. # The cluster metadata contain the actual Kafka brokers events are published # to. #hosts: ["localhost:9092"] # The Kafka topic used for produced events. The setting can be a format string # using any event field. To set the topic from document type use `%{[type]}`. #topic: beats # The Kafka event key setting. Use format string to create a unique event key. # By default no event key will be generated. #key: '' # The Kafka event partitioning strategy. Default hashing strategy is `hash` # using the `output.kafka.key` setting or randomly distributes events if # `output.kafka.key` is not configured. #partition.hash: # If enabled, events will only be published to partitions with reachable # leaders. Default is false. #reachable_only: false # Configure alternative event field names used to compute the hash value. # If empty `output.kafka.key` setting will be used. # Default value is empty list. #hash: [] # Authentication details. Password is required if username is set. #username: '' #password: '' # SASL authentication mechanism used. Can be one of PLAIN, SCRAM-SHA-256 or SCRAM-SHA-512. # Defaults to PLAIN when `username` and `password` are configured. #sasl.mechanism: '' # Kafka version Filebeat is assumed to run against. Defaults to the "1.0.0". #version: '1.0.0' # Configure JSON encoding #codec.json: # Pretty-print JSON event #pretty: false # Configure escaping HTML symbols in strings. #escape_html: false # Metadata update configuration. Metadata contains leader information # used to decide which broker to use when publishing. #metadata: # Max metadata request retry attempts when cluster is in middle of leader # election. Defaults to 3 retries. #retry.max: 3 # Wait time between retries during leader elections. Default is 250ms. #retry.backoff: 250ms # Refresh metadata interval. Defaults to every 10 minutes. #refresh_frequency: 10m # Strategy for fetching the topics metadata from the broker. Default is false. #full: false # The number of concurrent load-balanced Kafka output workers. #worker: 1 # The number of times to retry publishing an event after a publishing failure. # After the specified number of retries, events are typically dropped. # Some Beats, such as Filebeat, ignore the max_retries setting and retry until # all events are published. Set max_retries to a value less than 0 to retry # until all events are published. The default is 3. #max_retries: 3 # The number of seconds to wait before trying to republish to Kafka # after a network error. After waiting backoff.init seconds, the Beat # tries to republish. If the attempt fails, the backoff timer is increased # exponentially up to backoff.max. After a successful publish, the backoff # timer is reset. The default is 1s. #backoff.init: 1s # The maximum number of seconds to wait before attempting to republish to # Kafka after a network error. The default is 60s. #backoff.max: 60s # The maximum number of events to bulk in a single Kafka request. The default # is 2048. #bulk_max_size: 2048 # Duration to wait before sending bulk Kafka request. 0 is no delay. The default # is 0. #bulk_flush_frequency: 0s # The number of seconds to wait for responses from the Kafka brokers before # timing out. The default is 30s. #timeout: 30s # The maximum duration a broker will wait for number of required ACKs. The # default is 10s. #broker_timeout: 10s # The number of messages buffered for each Kafka broker. The default is 256. #channel_buffer_size: 256 # The keep-alive period for an active network connection. If 0s, keep-alives # are disabled. The default is 0 seconds. #keep_alive: 0 # Sets the output compression codec. Must be one of none, snappy and gzip. The # default is gzip. #compression: gzip # Set the compression level. Currently only gzip provides a compression level # between 0 and 9. The default value is chosen by the compression algorithm. #compression_level: 4 # The maximum permitted size of JSON-encoded messages. Bigger messages will be # dropped. The default value is 1000000 (bytes). This value should be equal to # or less than the broker's message.max.bytes. #max_message_bytes: 1000000 # The ACK reliability level required from broker. 0=no response, 1=wait for # local commit, -1=wait for all replicas to commit. The default is 1. Note: # If set to 0, no ACKs are returned by Kafka. Messages might be lost silently # on error. #required_acks: 1 # The configurable ClientID used for logging, debugging, and auditing # purposes. The default is "beats". #client_id: beats # Use SSL settings for HTTPS. #ssl.enabled: true # Controls the verification of certificates. Valid values are: # * full, which verifies that the provided certificate is signed by a trusted # authority (CA) and also verifies that the server's hostname (or IP address) # matches the names identified within the certificate. # * strict, which verifies that the provided certificate is signed by a trusted # authority (CA) and also verifies that the server's hostname (or IP address) # matches the names identified within the certificate. If the Subject Alternative # Name is empty, it returns an error. # * certificate, which verifies that the provided certificate is signed by a # trusted authority (CA), but does not perform any hostname verification. # * none, which performs no verification of the server's certificate. This # mode disables many of the security benefits of SSL/TLS and should only be used # after very careful consideration. It is primarily intended as a temporary # diagnostic mechanism when attempting to resolve TLS errors; its use in # production environments is strongly discouraged. # The default value is full. #ssl.verification_mode: full # List of supported/valid TLS versions. By default all TLS versions from 1.1 # up to 1.3 are enabled. #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] # List of root certificates for HTTPS server verifications #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] # Certificate for SSL client authentication #ssl.certificate: "/etc/pki/client/cert.pem" # Client certificate key #ssl.key: "/etc/pki/client/cert.key" # Optional passphrase for decrypting the certificate key. #ssl.key_passphrase: '' # Configure cipher suites to be used for SSL connections #ssl.cipher_suites: [] # Configure curve types for ECDHE-based cipher suites #ssl.curve_types: [] # Configure what types of renegotiation are supported. Valid options are # never, once, and freely. Default is never. #ssl.renegotiation: never # Configure a pin that can be used to do extra validation of the verified certificate chain, # this allow you to ensure that a specific certificate is used to validate the chain of trust. # # The pin is a base64 encoded string of the SHA-256 fingerprint. #ssl.ca_sha256: "" # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. #kerberos.enabled: true # Authentication type to use with Kerberos. Available options: keytab, password. #kerberos.auth_type: password # Path to the keytab file. It is used when auth_type is set to keytab. #kerberos.keytab: /etc/security/keytabs/kafka.keytab # Path to the Kerberos configuration. #kerberos.config_path: /etc/krb5.conf # The service name. Service principal name is contructed from # service_name/hostname@realm. #kerberos.service_name: kafka # Name of the Kerberos user. #kerberos.username: elastic # Password of the Kerberos user. It is used when auth_type is set to password. #kerberos.password: changeme # Kerberos realm. #kerberos.realm: ELASTIC # Enables Kerberos FAST authentication. This may # conflict with certain Active Directory configurations. #kerberos.enable_krb5_fast: false # -------------------------------- Redis Output -------------------------------- #output.redis: # Boolean flag to enable or disable the output module. #enabled: true # Configure JSON encoding #codec.json: # Pretty print json event #pretty: false # Configure escaping HTML symbols in strings. #escape_html: false # The list of Redis servers to connect to. If load-balancing is enabled, the # events are distributed to the servers in the list. If one server becomes # unreachable, the events are distributed to the reachable servers only. # The hosts setting supports redis and rediss urls with custom password like # redis://:password@localhost:6379. #hosts: ["localhost:6379"] # The name of the Redis list or channel the events are published to. The # default is filebeat. #key: filebeat # The password to authenticate to Redis with. The default is no authentication. #password: # The Redis database number where the events are published. The default is 0. #db: 0 # The Redis data type to use for publishing events. If the data type is list, # the Redis RPUSH command is used. If the data type is channel, the Redis # PUBLISH command is used. The default value is list. #datatype: list # The number of workers to use for each host configured to publish events to # Redis. Use this setting along with the loadbalance option. For example, if # you have 2 hosts and 3 workers, in total 6 workers are started (3 for each # host). #worker: 1 # If set to true and multiple hosts or workers are configured, the output # plugin load balances published events onto all Redis hosts. If set to false, # the output plugin sends all events to only one host (determined at random) # and will switch to another host if the currently selected one becomes # unreachable. The default value is true. #loadbalance: true # The Redis connection timeout in seconds. The default is 5 seconds. #timeout: 5s # The number of times to retry publishing an event after a publishing failure. # After the specified number of retries, the events are typically dropped. # Some Beats, such as Filebeat, ignore the max_retries setting and retry until # all events are published. Set max_retries to a value less than 0 to retry # until all events are published. The default is 3. #max_retries: 3 # The number of seconds to wait before trying to reconnect to Redis # after a network error. After waiting backoff.init seconds, the Beat # tries to reconnect. If the attempt fails, the backoff timer is increased # exponentially up to backoff.max. After a successful connection, the backoff # timer is reset. The default is 1s. #backoff.init: 1s # The maximum number of seconds to wait before attempting to connect to # Redis after a network error. The default is 60s. #backoff.max: 60s # The maximum number of events to bulk in a single Redis request or pipeline. # The default is 2048. #bulk_max_size: 2048 # The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The # value must be a URL with a scheme of socks5://. #proxy_url: # This option determines whether Redis hostnames are resolved locally when # using a proxy. The default value is false, which means that name resolution # occurs on the proxy server. #proxy_use_local_resolver: false # Use SSL settings for HTTPS. #ssl.enabled: true # Controls the verification of certificates. Valid values are: # * full, which verifies that the provided certificate is signed by a trusted # authority (CA) and also verifies that the server's hostname (or IP address) # matches the names identified within the certificate. # * strict, which verifies that the provided certificate is signed by a trusted # authority (CA) and also verifies that the server's hostname (or IP address) # matches the names identified within the certificate. If the Subject Alternative # Name is empty, it returns an error. # * certificate, which verifies that the provided certificate is signed by a # trusted authority (CA), but does not perform any hostname verification. # * none, which performs no verification of the server's certificate. This # mode disables many of the security benefits of SSL/TLS and should only be used # after very careful consideration. It is primarily intended as a temporary # diagnostic mechanism when attempting to resolve TLS errors; its use in # production environments is strongly discouraged. # The default value is full. #ssl.verification_mode: full # List of supported/valid TLS versions. By default all TLS versions from 1.1 # up to 1.3 are enabled. #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] # List of root certificates for HTTPS server verifications #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] # Certificate for SSL client authentication #ssl.certificate: "/etc/pki/client/cert.pem" # Client certificate key #ssl.key: "/etc/pki/client/cert.key" # Optional passphrase for decrypting the certificate key. #ssl.key_passphrase: '' # Configure cipher suites to be used for SSL connections #ssl.cipher_suites: [] # Configure curve types for ECDHE-based cipher suites #ssl.curve_types: [] # Configure what types of renegotiation are supported. Valid options are # never, once, and freely. Default is never. #ssl.renegotiation: never # Configure a pin that can be used to do extra validation of the verified certificate chain, # this allow you to ensure that a specific certificate is used to validate the chain of trust. # # The pin is a base64 encoded string of the SHA-256 fingerprint. #ssl.ca_sha256: "" # -------------------------------- File Output --------------------------------- #output.file: # Boolean flag to enable or disable the output module. #enabled: true # Configure JSON encoding #codec.json: # Pretty-print JSON event #pretty: false # Configure escaping HTML symbols in strings. #escape_html: false # Path to the directory where to save the generated files. The option is # mandatory. #path: "/tmp/filebeat" # Name of the generated files. The default is `filebeat` and it generates # files: `filebeat`, `filebeat.1`, `filebeat.2`, etc. #filename: filebeat # Maximum size in kilobytes of each file. When this size is reached, and on # every Filebeat restart, the files are rotated. The default value is 10240 # kB. #rotate_every_kb: 10000 # Maximum number of files under path. When this number of files is reached, # the oldest file is deleted and the rest are shifted from last to first. The # default is 7 files. #number_of_files: 7 # Permissions to use for file creation. The default is 0600. #permissions: 0600 # ------------------------------- Console Output ------------------------------- #output.console: # Boolean flag to enable or disable the output module. #enabled: true # Configure JSON encoding #codec.json: # Pretty-print JSON event #pretty: false # Configure escaping HTML symbols in strings. #escape_html: false # =================================== Paths ==================================== # The home path for the Filebeat installation. This is the default base path # for all other path settings and for miscellaneous files that come with the # distribution (for example, the sample dashboards). # If not set by a CLI flag or in the configuration file, the default for the # home path is the location of the binary. #path.home: # The configuration path for the Filebeat installation. This is the default # base path for configuration files, including the main YAML configuration file # and the Elasticsearch template file. If not set by a CLI flag or in the # configuration file, the default for the configuration path is the home path. #path.config: ${path.home} # The data path for the Filebeat installation. This is the default base path # for all the files in which Filebeat needs to store its data. If not set by a # CLI flag or in the configuration file, the default for the data path is a data # subdirectory inside the home path. #path.data: ${path.home}/data # The logs path for a Filebeat installation. This is the default location for # the Beat's log files. If not set by a CLI flag or in the configuration file, # the default for the logs path is a logs subdirectory inside the home path. #path.logs: ${path.home}/logs # ================================== Keystore ================================== # Location of the Keystore containing the keys and their sensitive values. #keystore.path: "${path.config}/beats.keystore" # ================================= Dashboards ================================= # These settings control loading the sample dashboards to the Kibana index. Loading # the dashboards are disabled by default and can be enabled either by setting the # options here, or by using the `-setup` CLI flag or the `setup` command. #setup.dashboards.enabled: false # The directory from where to read the dashboards. The default is the `kibana` # folder in the home path. #setup.dashboards.directory: ${path.home}/kibana # The URL from where to download the dashboards archive. It is used instead of # the directory if it has a value. #setup.dashboards.url: # The file archive (zip file) from where to read the dashboards. It is used instead # of the directory when it has a value. #setup.dashboards.file: # In case the archive contains the dashboards from multiple Beats, this lets you # select which one to load. You can load all the dashboards in the archive by # setting this to the empty string. #setup.dashboards.beat: filebeat # The name of the Kibana index to use for setting the configuration. Default is ".kibana" #setup.dashboards.kibana_index: .kibana # The Elasticsearch index name. This overwrites the index name defined in the # dashboards and index pattern. Example: testbeat-* #setup.dashboards.index: # Always use the Kibana API for loading the dashboards instead of autodetecting # how to install the dashboards by first querying Elasticsearch. #setup.dashboards.always_kibana: false # If true and Kibana is not reachable at the time when dashboards are loaded, # it will retry to reconnect to Kibana instead of exiting with an error. #setup.dashboards.retry.enabled: false # Duration interval between Kibana connection retries. #setup.dashboards.retry.interval: 1s # Maximum number of retries before exiting with an error, 0 for unlimited retrying. #setup.dashboards.retry.maximum: 0 # ================================== Template ================================== # A template is used to set the mapping in Elasticsearch # By default template loading is enabled and the template is loaded. # These settings can be adjusted to load your own template or overwrite existing ones. # Set to false to disable template loading. #setup.template.enabled: true # Select the kind of index template. From Elasticsearch 7.8, it is possible to # use component templates. Available options: legacy, component, index. # By default filebeat uses the legacy index templates. #setup.template.type: legacy # Template name. By default the template name is "filebeat-%{[agent.version]}" # The template name and pattern has to be set in case the Elasticsearch index pattern is modified. #setup.template.name: "filebeat-%{[agent.version]}" # Template pattern. By default the template pattern is "-%{[agent.version]}-*" to apply to the default index settings. # The first part is the version of the beat and then -* is used to match all daily indices. # The template name and pattern has to be set in case the Elasticsearch index pattern is modified. #setup.template.pattern: "filebeat-%{[agent.version]}-*" # Path to fields.yml file to generate the template #setup.template.fields: "${path.config}/fields.yml" # A list of fields to be added to the template and Kibana index pattern. Also # specify setup.template.overwrite: true to overwrite the existing template. #setup.template.append_fields: #- name: field_name # type: field_type # Enable JSON template loading. If this is enabled, the fields.yml is ignored. #setup.template.json.enabled: false # Path to the JSON template file #setup.template.json.path: "${path.config}/template.json" # Name under which the template is stored in Elasticsearch #setup.template.json.name: "" # Overwrite existing template # Do not enable this option for more than one instance of filebeat as it might # overload your Elasticsearch with too many update requests. #setup.template.overwrite: false # Elasticsearch template settings setup.template.settings: # A dictionary of settings to place into the settings.index dictionary # of the Elasticsearch template. For more details, please check # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html #index: #number_of_shards: 1 #codec: best_compression # A dictionary of settings for the _source field. For more details, please check # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html #_source: #enabled: false # ====================== Index Lifecycle Management (ILM) ====================== # Configure index lifecycle management (ILM). These settings create a write # alias and add additional settings to the index template. When ILM is enabled, # output.elasticsearch.index is ignored, and the write alias is used to set the # index name. # Enable ILM support. Valid values are true, false, and auto. When set to auto # (the default), the Beat uses index lifecycle management when it connects to a # cluster that supports ILM; otherwise, it creates daily indices. #setup.ilm.enabled: auto # Set the prefix used in the index lifecycle write alias name. The default alias # name is 'filebeat-%{[agent.version]}'. #setup.ilm.rollover_alias: 'filebeat' # Set the rollover index pattern. The default is "%{now/d}-000001". #setup.ilm.pattern: "{now/d}-000001" # Set the lifecycle policy name. The default policy name is # 'beatname'. #setup.ilm.policy_name: "mypolicy" # The path to a JSON file that contains a lifecycle policy configuration. Used # to load your own lifecycle policy. #setup.ilm.policy_file: # Disable the check for an existing lifecycle policy. The default is true. If # you disable this check, set setup.ilm.overwrite: true so the lifecycle policy # can be installed. #setup.ilm.check_exists: true # Overwrite the lifecycle policy at startup. The default is false. #setup.ilm.overwrite: false # =================================== Kibana =================================== # Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API. # This requires a Kibana endpoint configuration. setup.kibana: # Kibana Host # Scheme and port can be left out and will be set to the default (http and 5601) # In case you specify and additional path, the scheme is required: http://localhost:5601/path # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601 #host: "localhost:5601" # Optional protocol and basic auth credentials. #protocol: "https" #username: "elastic" #password: "changeme" # Optional HTTP path #path: "" # Optional Kibana space ID. #space.id: "" # Use SSL settings for HTTPS. #ssl.enabled: true # Controls the verification of certificates. Valid values are: # * full, which verifies that the provided certificate is signed by a trusted # authority (CA) and also verifies that the server's hostname (or IP address) # matches the names identified within the certificate. # * strict, which verifies that the provided certificate is signed by a trusted # authority (CA) and also verifies that the server's hostname (or IP address) # matches the names identified within the certificate. If the Subject Alternative # Name is empty, it returns an error. # * certificate, which verifies that the provided certificate is signed by a # trusted authority (CA), but does not perform any hostname verification. # * none, which performs no verification of the server's certificate. This # mode disables many of the security benefits of SSL/TLS and should only be used # after very careful consideration. It is primarily intended as a temporary # diagnostic mechanism when attempting to resolve TLS errors; its use in # production environments is strongly discouraged. # The default value is full. #ssl.verification_mode: full # List of supported/valid TLS versions. By default all TLS versions from 1.1 # up to 1.3 are enabled. #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] # List of root certificates for HTTPS server verifications #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] # Certificate for SSL client authentication #ssl.certificate: "/etc/pki/client/cert.pem" # Client certificate key #ssl.key: "/etc/pki/client/cert.key" # Optional passphrase for decrypting the certificate key. #ssl.key_passphrase: '' # Configure cipher suites to be used for SSL connections #ssl.cipher_suites: [] # Configure curve types for ECDHE-based cipher suites #ssl.curve_types: [] # Configure what types of renegotiation are supported. Valid options are # never, once, and freely. Default is never. #ssl.renegotiation: never # Configure a pin that can be used to do extra validation of the verified certificate chain, # this allow you to ensure that a specific certificate is used to validate the chain of trust. # # The pin is a base64 encoded string of the SHA-256 fingerprint. #ssl.ca_sha256: "" # ================================== Logging =================================== # There are four options for the log output: file, stderr, syslog, eventlog # The file output is the default. # Sets log level. The default log level is info. # Available log levels are: error, warning, info, debug #logging.level: info # Enable debug output for selected components. To enable all selectors use ["*"] # Other available selectors are "beat", "publisher", "service" # Multiple selectors can be chained. #logging.selectors: [ ] # Send all logging output to stderr. The default is false. #logging.to_stderr: false # Send all logging output to syslog. The default is false. #logging.to_syslog: false # Send all logging output to Windows Event Logs. The default is false. #logging.to_eventlog: false # If enabled, Filebeat periodically logs its internal metrics that have changed # in the last period. For each metric that changed, the delta from the value at # the beginning of the period is logged. Also, the total values for # all non-zero internal metrics are logged on shutdown. The default is true. #logging.metrics.enabled: true # The period after which to log the internal metrics. The default is 30s. #logging.metrics.period: 30s # A list of metrics namespaces to report in the logs. Defaults to [stats]. # `stats` contains general Beat metrics. `dataset` may be present in some # Beats and contains module or input metrics. #logging.metrics.namespaces: [stats] # Logging to rotating files. Set logging.to_files to false to disable logging to # files. logging.to_files: true logging.files: # Configure the path where the logs are written. The default is the logs directory # under the home path (the binary location). #path: /var/log/filebeat # The name of the files where the logs are written to. #name: filebeat # Configure log file size limit. If limit is reached, log file will be # automatically rotated #rotateeverybytes: 10485760 # = 10MB # Number of rotated log files to keep. Oldest files will be deleted first. #keepfiles: 7 # The permissions mask to apply when rotating log files. The default value is 0600. # Must be a valid Unix-style file permissions mask expressed in octal notation. #permissions: 0600 # Enable log file rotation on time intervals in addition to size-based rotation. # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h # are boundary-aligned with minutes, hours, days, weeks, months, and years as # reported by the local system clock. All other intervals are calculated from the # Unix epoch. Defaults to disabled. #interval: 0 # Rotate existing logs on startup rather than appending to the existing # file. Defaults to true. # rotateonstartup: true # Rotated files are either suffixed with a number e.g. filebeat.1 when # renamed during rotation. Or when set to date, the date is added to # the end of the file. On rotation a new file is created, older files are untouched. #suffix: count # Set to true to log messages in JSON format. #logging.json: false # Set to true, to log messages with minimal required Elastic Common Schema (ECS) # information. Recommended to use in combination with `logging.json=true` # Defaults to false. #logging.ecs: false # ============================= X-Pack Monitoring ============================== # Filebeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The # reporting is disabled by default. # Set to true to enable the monitoring reporter. #monitoring.enabled: false # Sets the UUID of the Elasticsearch cluster under which monitoring data for this # Filebeat instance will appear in the Stack Monitoring UI. If output.elasticsearch # is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch. #monitoring.cluster_uuid: # Uncomment to send the metrics to Elasticsearch. Most settings from the # Elasticsearch output are accepted here as well. # Note that the settings should point to your Elasticsearch *monitoring* cluster. # Any setting that is not set is automatically inherited from the Elasticsearch # output configuration, so if you have the Elasticsearch output configured such # that it is pointing to your Elasticsearch monitoring cluster, you can simply # uncomment the following line. #monitoring.elasticsearch: # Array of hosts to connect to. # Scheme and port can be left out and will be set to the default (http and 9200) # In case you specify and additional path, the scheme is required: http://localhost:9200/path # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 #hosts: ["localhost:9200"] # Set gzip compression level. #compression_level: 0 # Protocol - either `http` (default) or `https`. #protocol: "https" # Authentication credentials - either API key or username/password. #api_key: "id:api_key" #username: "beats_system" #password: "changeme" # Dictionary of HTTP parameters to pass within the URL with index operations. #parameters: #param1: value1 #param2: value2 # Custom HTTP headers to add to each request #headers: # X-My-Header: Contents of the header # Proxy server url #proxy_url: http://proxy:3128 # The number of times a particular Elasticsearch index operation is attempted. If # the indexing operation doesn't succeed after this many retries, the events are # dropped. The default is 3. #max_retries: 3 # The maximum number of events to bulk in a single Elasticsearch bulk API index request. # The default is 50. #bulk_max_size: 50 # The number of seconds to wait before trying to reconnect to Elasticsearch # after a network error. After waiting backoff.init seconds, the Beat # tries to reconnect. If the attempt fails, the backoff timer is increased # exponentially up to backoff.max. After a successful connection, the backoff # timer is reset. The default is 1s. #backoff.init: 1s # The maximum number of seconds to wait before attempting to connect to # Elasticsearch after a network error. The default is 60s. #backoff.max: 60s # Configure HTTP request timeout before failing an request to Elasticsearch. #timeout: 90 # Use SSL settings for HTTPS. #ssl.enabled: true # Controls the verification of certificates. Valid values are: # * full, which verifies that the provided certificate is signed by a trusted # authority (CA) and also verifies that the server's hostname (or IP address) # matches the names identified within the certificate. # * strict, which verifies that the provided certificate is signed by a trusted # authority (CA) and also verifies that the server's hostname (or IP address) # matches the names identified within the certificate. If the Subject Alternative # Name is empty, it returns an error. # * certificate, which verifies that the provided certificate is signed by a # trusted authority (CA), but does not perform any hostname verification. # * none, which performs no verification of the server's certificate. This # mode disables many of the security benefits of SSL/TLS and should only be used # after very careful consideration. It is primarily intended as a temporary # diagnostic mechanism when attempting to resolve TLS errors; its use in # production environments is strongly discouraged. # The default value is full. #ssl.verification_mode: full # List of supported/valid TLS versions. By default all TLS versions from 1.1 # up to 1.3 are enabled. #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] # List of root certificates for HTTPS server verifications #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] # Certificate for SSL client authentication #ssl.certificate: "/etc/pki/client/cert.pem" # Client certificate key #ssl.key: "/etc/pki/client/cert.key" # Optional passphrase for decrypting the certificate key. #ssl.key_passphrase: '' # Configure cipher suites to be used for SSL connections #ssl.cipher_suites: [] # Configure curve types for ECDHE-based cipher suites #ssl.curve_types: [] # Configure what types of renegotiation are supported. Valid options are # never, once, and freely. Default is never. #ssl.renegotiation: never # Configure a pin that can be used to do extra validation of the verified certificate chain, # this allow you to ensure that a specific certificate is used to validate the chain of trust. # # The pin is a base64 encoded string of the SHA-256 fingerprint. #ssl.ca_sha256: "" # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. #kerberos.enabled: true # Authentication type to use with Kerberos. Available options: keytab, password. #kerberos.auth_type: password # Path to the keytab file. It is used when auth_type is set to keytab. #kerberos.keytab: /etc/elastic.keytab # Path to the Kerberos configuration. #kerberos.config_path: /etc/krb5.conf # Name of the Kerberos user. #kerberos.username: elastic # Password of the Kerberos user. It is used when auth_type is set to password. #kerberos.password: changeme # Kerberos realm. #kerberos.realm: ELASTIC #metrics.period: 10s #state.period: 1m # The `monitoring.cloud.id` setting overwrites the `monitoring.elasticsearch.hosts` # setting. You can find the value for this setting in the Elastic Cloud web UI. #monitoring.cloud.id: # The `monitoring.cloud.auth` setting overwrites the `monitoring.elasticsearch.username` # and `monitoring.elasticsearch.password` settings. The format is `:`. #monitoring.cloud.auth: # =============================== HTTP Endpoint ================================ # Each beat can expose internal metrics through a HTTP endpoint. For security # reasons the endpoint is disabled by default. This feature is currently experimental. # Stats can be access through http://localhost:5066/stats . For pretty JSON output # append ?pretty to the URL. # Defines if the HTTP endpoint is enabled. #http.enabled: false # The HTTP endpoint will bind to this hostname, IP address, unix socket or named pipe. # When using IP addresses, it is recommended to only use localhost. #http.host: localhost # Port on which the HTTP endpoint will bind. Default is 5066. #http.port: 5066 # Define which user should be owning the named pipe. #http.named_pipe.user: # Define which the permissions that should be applied to the named pipe, use the Security # Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with # `http.user`. #http.named_pipe.security_descriptor: # ============================== Process Security ============================== # Enable or disable seccomp system call filtering on Linux. Default is enabled. #seccomp.enabled: true # ============================== Instrumentation =============================== # Instrumentation support for the filebeat. #instrumentation: # Set to true to enable instrumentation of filebeat. #enabled: false # Environment in which filebeat is running on (eg: staging, production, etc.) #environment: "" # APM Server hosts to report instrumentation results to. #hosts: # - http://localhost:8200 # API Key for the APM Server(s). # If api_key is set then secret_token will be ignored. #api_key: # Secret token for the APM Server(s). #secret_token: # Enable profiling of the server, recording profile samples as events. # # This feature is experimental. #profiling: #cpu: # Set to true to enable CPU profiling. #enabled: false #interval: 60s #duration: 10s #heap: # Set to true to enable heap profiling. #enabled: false #interval: 60s # ================================= Migration ================================== # This allows to enable 6.7 migration aliases #migration.6_to_7.enabled: false