#
# This is the Network Flight Recorder (NFR) configuration file. Here you define
# the location of the AlphaSOC Analytics Engine ("engine" section), the source of
# network for NFR is to score (by sniffing a network interface or monitoring log
# files on disk, as set in the "inputs" section), and where to send the alerts
# generated by the Analytics Engine (set in the "outputs" section).
#
# Please contact support@alphasoc.com if you have any questions
#

################################################################################
# The engine section describes the location of the Analytics Engine, your
# AlphaSOC API key, and the polling interval to retrieve alerts.
################################################################################

engine:
  # Location of the AlphaSOC Analytics Engine
  # This is a URI and defaults to the AlphaSOC cloud. Please contact
  # support@alphasoc.com to discuss on-premise operation and VM setup.
  # Default: https://api.alphasoc.net
  host: https://api.alphasoc.net

  # Your AlphaSOC API key (required to use the service)
  # Use "nfr account register" to generate one
  api_key: test-api-key

  # Use the following section to enable or disable analysis modules
  analyze:
    # Enable (true) or disable (false) DNS event processing
    # Default: true
    dns: true
    # Enable (true) or disable (false) IP event processing
    # Default: true
    ip: true

  alerts:
    # Interval for polling the Analytics Engine for new alerts
    # Default: 5m
    poll_interval: 5m

################################################################################
# The inputs section describes where NFR collects network traffic to score
# from (e.g. a network interface to sniff, or a log file to read)
################################################################################

inputs:
  # Capture live traffic via network sniffing
  sniffer:
    # Define whether NFR should sniff network data or not
    # Default: true
    enabled: false
    # Interface to listen for network traffic on (e.g. eth0)
    # If none is defined, the first non-loopback interface will be used by NFR
    # Default: (none)
    interface:

  # Define log files containing network events to monitor
  # Files are only monitored if NFR is run with the "monitor" command. You
  # can monitor multiple files here (e.g. Bro IDS dns.log and conn.log files)
  # Default: []
  monitor:
    # Format of the file (possible values are: bro, suricata, msdns)
    # Default: (none)
    - format:
      # Type of events in the file (possible values are: dns, ip)
      # Default: (none)
      type:
      # File on disk which NFR should monitor
      # Default: (none)
      file:

  # Time format used for parsing MSDNS log files.
  # Format layout documented at https://golang.org/pkg/time/#Parse
  #msdns_time_format: "1/02/2006 3:04:05 PM"

  # Use inotify for detecting changes in files. If false, regular file polling
  # will be used. Default: false for Windows, true otherwise.
  #use_inotify: true

  elastic:
    # Set to true to retrieve telemetry from elasticsearch
    # Default: false
    enabled: false

    # Either cloud_id or hosts are required. You can find your
    # Cloud ID at https://cloud.elastic.co/deployments/
    cloud_id:
    # hosts:
    #  - elastic.example.com:9200

    # Use an API key (recommended) or username/password to authorize to the
    # elastic search instance. Find out more about API Keys here:
    # https://cloud.elastic.co/deployment-features/keys
    api_key:
    # username:
    # password:

    searches:
      # Define your searches, one per event type (dns, ip, http, tls). At least
      # one search is required.
      - event_type: dns

        # Number of documents to download per one search. Default (10000) is recommended.
        # Decrease batch size if search queries time out.
        batch_size: 10000

        # Indices to search. Wildcards are allowed.
        indices:
          # - filebeat-*

        # Set index_schema if your documents adhere to well known formats.
        # Currently supported schemas: ecs.
        # If your document structure is non-standard, leave index_schema empty and
        # use search_terms and field_names to customize the search query and to let
        # nfr now what document fields it should look for.
        index_schema: ecs

        # Even if index_schema is set, you can override default search term with
        # a custom one. Search terms should be a valid JSON compatible with Search API, as
        # it'll be embedded into the 'filter' clause in Query DSL.
        # For details, see https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-bool-query.html
        search_term:

        # The following fields must be present in a document to be retrieved by NFR.
        must_have_fields:
          # - @timestamp

        # A timestamp format (ie. strict_date_time_no_millis) can be specified, and will be
        # added to search queries when appropriate.  It's advised to leave this unset unless
        # you have reason to set a specific format.
        # timestamp_format:

        # Field names should contain field paths that contain required telemetry data.
        # They can be left empty if index_schema is set. Similarly to search terms, you can
        # override some of all of mappings by setting the fields below.
        field_names:
          # Event timestamp (required).
          # timestamp:
          # Timestamp when an event arrived in the central data store (required).
          # event_ingested:
          # IP address of the source (IPv4 or IPv6, required).
          # src_ip:
          # Port of the source
          # src_port:
          # FQDN being queried (required for dns event type).
          # query:
          # DNS query type.
          # qtype:
      - event_type: ip
        indices:
          # - filebeat-*
        index_schema: ecs
        # A timestamp format (ie. strict_date_time_no_millis) can be specified, and will be
        # added to search queries when appropriate.  It's advised to leave this unset unless
        # you have reason to set a specific format.
        # timestamp_format:
        field_names:
          # Event timestamp (required).
          # timestamp:
          # Timestamp when an event arrived in the central data store (required).
          # event_ingested:
          # IP address of the source (IPv4 or IPv6, required).
          # src_ip:
          # Port of the source
          # src_port:
          # IP address of the destination (IPv4 or IPv6, required).
          # dest_ip:
          # Port of the destination.
          # dest_port:
          # Protocol
          # proto:
          # Bytes ingested by the source.
          # bytes_in:
          # Bytes transmitted by the source.
          # bytes_out:
      - event_type: http
        indices:
          # - filebeat-*
        index_schema: ecs
        # A timestamp format (ie. strict_date_time_no_millis) can be specified, and will be
        # added to search queries when appropriate.  It's advised to leave this unset unless
        # you have reason to set a specific format.
        # timestamp_format:
        field_names:
          # Event timestamp (required).
          # timestamp:
          # Timestamp when an event arrived in the central data store (required).
          # event_ingested:
          # IP address of the source (IPv4 or IPv6, required).
          # src_ip:
          # Port of the source
          # src_port:
          # URL (required).
          # url:
          # HTTP Method.
          # method:
          # HTTP Status.
          # status:
          # HTTP User Agent.
          # user_agent:
      - event_type: tls
        indices:
          # - filebeat-*
        index_schema: ecs
        # A timestamp format (ie. strict_date_time_no_millis) can be specified, and will be
        # added to search queries when appropriate.  It's advised to leave this unset unless
        # you have reason to set a specific format.
        # timestamp_format:
        field_names:
          # Event timestamp (required).
          # timestamp:
          # Timestamp when an event arrived in the central data store (required).
          # event_ingested:
          # IP address of the source (IPv4 or IPv6, required).
          # src_ip:
          # Port of the source
          # src_port:
          # IP address of the destination (IPv4 or IPv6, required).
          # dest_ip:
          # Port of the destination
          # dest_port:
          # Certificate fingerprint using the SHA1 digest of DER-encoded version of certificate offered by the server
          # cert_hash:
          # Subject of the issuer of the x.509 certificate presented by the server
          # issuer:
          # Subject of the x.509 certificate presented by the server
          # subject:
          # Timestamp indicating when server certificate is first considered valid
          # valid_from:
          # Timestamp indicating when server certificate is no longer considered valid
          # valid_to
          # A hash that identifies clients based on how they perform an SSL/TLS handshake
          # ja3:
          # A hash that identifies servers based on how they perform an SSL/TLS handshake
          # ja3s:

################################################################################
# The outputs section describes where NFR should send the alerts generated by
# the Analytics Engine (e.g. Graylog, a local file, or terminal)
################################################################################

outputs:
  # Define whether NFR should gather alerts to send elsewhere. You can use this
  # toggle to disable escalation of alerts and run NFR in input-only mode, by
  # which it submits network events to the Analytics Engine for scoring. This
  # allows you to run multiple sensors and elect a single NFR instance to
  # collect alerts and send them elsewhere.
  # Default: true
  enabled: true

  # Syslog server where AlphaSOC alerts will be sent in JSON or CEF format.
  # NFR will use TCP port 514 and send JSON messages via syslog by default.
  # Use the fields below to define the syslog server IP address and port.
  syslog:
    # IP address of the syslog server
    # Default: (none)
    ip:
    # Port for the syslog TCP input
    # Default: 514
    port: 514
    # Connection protocol
    # Default: tcp
    proto: tcp
    # Log format (can be json or cef)
    # Default: json
    format: json

  # Graylog server URI where AlphaSOC alerts will be sent in GELF format
  # The AlphaSOC Network Behavior Analytics for Graylog content pack establishes
  # an input on TCP port 12201, which can be used to plug-and-play here.
  graylog:
    # URI to the server (for example tcp://127.0.0.1:12201)
    # Default: (none)
    uri:
    # Message level
    # Default: 1
    level: 1

  # Location to which alerts should be written. This can be a file, or a special
  # ouput (stderr or stdout) to print events to the terminal.
  # Default: stderr
  file: stderr

  # File output format (can be json or cef)
  # Default: json
  format: json

################################################################################
# Monitoring scope file location
################################################################################

scope:
  # Monitoring scope file to load
  # Default: (none)
  file:

################################################################################
# NFR logging configuration
################################################################################

log:
  # File to which NFR should log
  # To print log to console use two special outputs: stderr or stdout
  # Default: stdout
  file: stdout

  # Logging level. Possibles values are: debug, info, warn, error
  # Default: info
  level: info

################################################################################
# Internal NFR data location
################################################################################

data:
  # Define the file for internal data and caching
  # Default:
  #  - linux: /run/nfr.data
  #  - windows: %AppData%/nfr.data
  file: /run/nfr.data
  # If you use elastic input, define the directory for
  # internal bookkeeping and caching
  # Default:
  # - linux: /run/nfr
  # - windows: %AppData%/nfr
  dir: /run/nfr/data

################################################################################
# DNS data processing and queueing configuration
################################################################################

dns_events:
  # NFR buffer size for the DNS event queue
  # Default: 65535
  buffer_size: 65535

  # Interval for flushing data to Analytics Engine for scoring
  # Default: 30s
  flush_interval: 30s

  # If NFR is unable to send DNS events to the Analytics Engine, it can
  # write the events to disk (in PCAP format) and attempt to send them again
  failed:
    # File in which to store unprocessed DNS events on disk
    # Default: (none)
    file:

################################################################################
# IP data processing and queueing configuration
################################################################################

ip_events:
  # NFR buffer size for the IP event queue
  # Default: 65535
  buffer_size: 65535

  # Interval for flushing data to Analytics Engine for scoring
  # Default: 30s
  flush_interval: 30s

  # If NFR is unable to send IP events to the Analytics Engine, it can
  # write the events to disk (in PCAP format) and attempt to send them again
  failed:
    # File in which to store unprocessed IP events on disk
    # Default: (none)
    file:

################################################################################
# HTTP data processing and queueing configuration
################################################################################

http_events:
  # NFR buffer size for the IP event queue
  # Default: 65535
  buffer_size: 65535

  # Interval for flushing data to Analytics Engine for scoring
  # Default: 30s
  flush_interval: 30s