apiVersion: v1 kind: ConfigMap metadata: name: agent-node-datastreams namespace: kube-system labels: k8s-app: elastic-agent-standalone data: agent.yml: |- outputs: default: type: elasticsearch hosts: - >- ${ES_HOST} username: ${ES_USERNAME} password: ${ES_PASSWORD} agent: monitoring: enabled: true use_output: default logs: true metrics: true providers.kubernetes: node: ${NODE_NAME} scope: node inputs: - name: kubernetes-cluster-metrics condition: ${kubernetes_leaderelection.leader} == true type: kubernetes/metrics use_output: default meta: package: name: kubernetes version: 1.9.0 data_stream: namespace: default streams: - data_stream: dataset: kubernetes.apiserver type: metrics metricsets: - apiserver bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token hosts: - 'https://${env.KUBERNETES_SERVICE_HOST}:${env.KUBERNETES_SERVICE_PORT}' period: 30s ssl.certificate_authorities: - /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - data_stream: dataset: kubernetes.event type: metrics metricsets: - event period: 10s add_metadata: true - data_stream: dataset: kubernetes.state_container type: metrics metricsets: - state_container add_metadata: true hosts: - 'kube-state-metrics:8080' period: 10s # If `https` is used to access `kube-state-metrics`, then to all `kubernetes.state_*` datasets should be added: # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token # ssl.certificate_authorities: # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_cronjob type: metrics metricsets: - state_cronjob add_metadata: true hosts: - 'kube-state-metrics:8080' period: 10s - data_stream: dataset: kubernetes.state_daemonset type: metrics metricsets: - state_daemonset add_metadata: true hosts: - 'kube-state-metrics:8080' period: 10s - data_stream: dataset: kubernetes.state_deployment type: metrics metricsets: - state_deployment add_metadata: true hosts: - 'kube-state-metrics:8080' period: 10s - data_stream: dataset: kubernetes.state_job type: metrics metricsets: - state_job add_metadata: true hosts: - 'kube-state-metrics:8080' period: 10s - data_stream: dataset: kubernetes.state_node type: metrics metricsets: - state_node add_metadata: true hosts: - 'kube-state-metrics:8080' period: 10s - data_stream: dataset: kubernetes.state_persistentvolume type: metrics metricsets: - state_persistentvolume add_metadata: true hosts: - 'kube-state-metrics:8080' period: 10s - data_stream: dataset: kubernetes.state_persistentvolumeclaim type: metrics metricsets: - state_persistentvolumeclaim add_metadata: true hosts: - 'kube-state-metrics:8080' period: 10s - data_stream: dataset: kubernetes.state_pod type: metrics metricsets: - state_pod add_metadata: true hosts: - 'kube-state-metrics:8080' period: 10s - data_stream: dataset: kubernetes.state_replicaset type: metrics metricsets: - state_replicaset add_metadata: true hosts: - 'kube-state-metrics:8080' period: 10s - data_stream: dataset: kubernetes.state_resourcequota type: metrics metricsets: - state_resourcequota add_metadata: true hosts: - 'kube-state-metrics:8080' period: 10s - data_stream: dataset: kubernetes.state_service type: metrics metricsets: - state_service add_metadata: true hosts: - 'kube-state-metrics:8080' period: 10s - data_stream: dataset: kubernetes.state_statefulset type: metrics metricsets: - state_statefulset add_metadata: true hosts: - 'kube-state-metrics:8080' period: 10s - data_stream: dataset: kubernetes.state_storageclass type: metrics metricsets: - state_storageclass add_metadata: true hosts: - 'kube-state-metrics:8080' period: 10s - name: system-logs type: logfile use_output: default meta: package: name: system version: 0.10.7 data_stream: namespace: default streams: - data_stream: dataset: system.auth type: logs paths: - /var/log/auth.log* - /var/log/secure* exclude_files: - .gz$ multiline: pattern: ^\s match: after processors: - add_fields: target: '' fields: ecs.version: 1.12.0 - data_stream: dataset: system.syslog type: logs paths: - /var/log/messages* - /var/log/syslog* exclude_files: - .gz$ multiline: pattern: ^\s match: after processors: - add_fields: target: '' fields: ecs.version: 1.12.0 - name: container-log id: container-log-${kubernetes.pod.name}-${kubernetes.container.id} type: filestream use_output: default meta: package: name: kubernetes version: 1.9.0 data_stream: namespace: default streams: - data_stream: dataset: kubernetes.container_logs type: logs prospector.scanner.symlinks: true parsers: - container: ~ # - ndjson: # target: json # - multiline: # type: pattern # pattern: '^\[' # negate: true # match: after paths: - /var/log/containers/*${kubernetes.container.id}.log - name: audit-log id: audit-log type: filestream use_output: default meta: package: name: kubernetes version: 1.9.0 data_stream: namespace: default streams: - data_stream: dataset: kubernetes.audit_logs type: logs exclude_files: - .gz$ parsers: - ndjson: add_error_key: true target: kubernetes_audit paths: - /var/log/kubernetes/kube-apiserver-audit.log # The default path of audit logs on Openshift: # - /var/log/kube-apiserver/audit.log processors: - rename: fields: - from: kubernetes_audit to: kubernetes.audit - script: id: dedot_annotations lang: javascript source: | function process(event) { var audit = event.Get("kubernetes.audit"); for (var annotation in audit["annotations"]) { var annotation_dedoted = annotation.replace(/\./g,'_') event.Rename("kubernetes.audit.annotations."+annotation, "kubernetes.audit.annotations."+annotation_dedoted) } return event; } function test() { var event = process(new Event({ "kubernetes": { "audit": { "annotations": { "authorization.k8s.io/decision": "allow", "authorization.k8s.io/reason": "RBAC: allowed by ClusterRoleBinding \"system:kube-scheduler\" of ClusterRole \"system:kube-scheduler\" to User \"system:kube-scheduler\"" } } } })); if (event.Get("kubernetes.audit.annotations.authorization_k8s_io/decision") !== "allow") { throw "expected kubernetes.audit.annotations.authorization_k8s_io/decision === allow"; } } - name: system-metrics type: system/metrics use_output: default meta: package: name: system version: 0.10.9 data_stream: namespace: default streams: - data_stream: dataset: system.core type: metrics metricsets: - core core.metrics: - percentages - data_stream: dataset: system.cpu type: metrics period: 10s cpu.metrics: - percentages - normalized_percentages metricsets: - cpu - data_stream: dataset: system.diskio type: metrics period: 10s diskio.include_devices: null metricsets: - diskio - data_stream: dataset: system.filesystem type: metrics period: 1m metricsets: - filesystem processors: - drop_event.when.regexp: system.filesystem.mount_point: ^/(sys|cgroup|proc|dev|etc|host|lib|snap)($|/) - data_stream: dataset: system.fsstat type: metrics period: 1m metricsets: - fsstat processors: - drop_event.when.regexp: system.fsstat.mount_point: ^/(sys|cgroup|proc|dev|etc|host|lib|snap)($|/) - data_stream: dataset: system.load type: metrics period: 10s metricsets: - load - data_stream: dataset: system.memory type: metrics period: 10s metricsets: - memory - data_stream: dataset: system.network type: metrics period: 10s network.interfaces: null metricsets: - network - data_stream: dataset: system.process type: metrics process.include_top_n.by_memory: 5 period: 10s processes: - .* process.include_top_n.by_cpu: 5 process.cgroups.enabled: false process.cmdline.cache.enabled: true metricsets: - process process.include_cpu_ticks: false system.hostfs: /hostfs - data_stream: dataset: system.process_summary type: metrics period: 10s metricsets: - process_summary system.hostfs: /hostfs - data_stream: dataset: system.socket_summary type: metrics period: 10s metricsets: - socket_summary system.hostfs: /hostfs - name: kubernetes-node-metrics type: kubernetes/metrics use_output: default meta: package: name: kubernetes version: 1.9.0 data_stream: namespace: default streams: - data_stream: dataset: kubernetes.controllermanager type: metrics metricsets: - controllermanager bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token hosts: - 'https://${kubernetes.pod.ip}:10257' period: 10s ssl.verification_mode: none condition: ${kubernetes.labels.component} == 'kube-controller-manager' # Openshift: # condition: ${kubernetes.labels.app} == 'kube-controller-manager' - data_stream: dataset: kubernetes.scheduler type: metrics metricsets: - scheduler bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token hosts: - 'https://${kubernetes.pod.ip}:10259' period: 10s ssl.verification_mode: none condition: ${kubernetes.labels.component} == 'kube-scheduler' # Openshift: # condition: ${kubernetes.labels.app} == 'openshift-kube-scheduler' - data_stream: dataset: kubernetes.proxy type: metrics metricsets: - proxy hosts: - 'localhost:10249' # Openshift: # - 'localhost:29101' period: 10s - data_stream: dataset: kubernetes.container type: metrics metricsets: - container add_metadata: true bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token hosts: - 'https://${env.NODE_NAME}:10250' period: 10s ssl.verification_mode: none # On Openshift ssl configuration must be replaced: # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token # ssl.certificate_authorities: # - /path/to/ca-bundle.crt - data_stream: dataset: kubernetes.node type: metrics metricsets: - node add_metadata: true bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token hosts: - 'https://${env.NODE_NAME}:10250' period: 10s ssl.verification_mode: none # On Openshift ssl configuration must be replaced: # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token # ssl.certificate_authorities: # - /path/to/ca-bundle.crt - data_stream: dataset: kubernetes.pod type: metrics metricsets: - pod add_metadata: true bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token hosts: - 'https://${env.NODE_NAME}:10250' period: 10s ssl.verification_mode: none # On Openshift ssl configuration must be replaced: # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token # ssl.certificate_authorities: # - /path/to/ca-bundle.crt - data_stream: dataset: kubernetes.system type: metrics metricsets: - system add_metadata: true bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token hosts: - 'https://${env.NODE_NAME}:10250' period: 10s ssl.verification_mode: none # On Openshift ssl configuration must be replaced: # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token # ssl.certificate_authorities: # - /path/to/ca-bundle.crt - data_stream: dataset: kubernetes.volume type: metrics metricsets: - volume add_metadata: true bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token hosts: - 'https://${env.NODE_NAME}:10250' period: 10s ssl.verification_mode: none # On Openshift ssl configuration must be replaced: # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token # ssl.certificate_authorities: # - /path/to/ca-bundle.crt # Add extra input blocks here, based on conditions # so as to automatically identify targeted Pods and start monitoring them # using a predefined integration. For instance: #- name: redis # type: redis/metrics # use_output: default # meta: # package: # name: redis # version: 0.3.6 # data_stream: # namespace: default # streams: # - data_stream: # dataset: redis.info # type: metrics # metricsets: # - info # hosts: # - '${kubernetes.pod.ip}:6379' # idle_timeout: 20s # maxconn: 10 # network: tcp # period: 10s # condition: ${kubernetes.labels.app} == 'redis' --- apiVersion: apps/v1 kind: DaemonSet metadata: name: elastic-agent-standalone namespace: kube-system labels: app: elastic-agent-standalone spec: selector: matchLabels: app: elastic-agent-standalone template: metadata: labels: app: elastic-agent-standalone spec: # Tolerations are needed to run Elastic Agent on Kubernetes control-plane nodes. # Agents running on control-plane nodes collect metrics from the control plane components (scheduler, controller manager) of Kubernetes tolerations: - key: node-role.kubernetes.io/control-plane effect: NoSchedule - key: node-role.kubernetes.io/master effect: NoSchedule serviceAccountName: elastic-agent-standalone hostNetwork: true dnsPolicy: ClusterFirstWithHostNet containers: - name: elastic-agent-standalone image: docker.elastic.co/beats/elastic-agent:8.3.3 args: [ "-c", "/etc/agent.yml", "-e", ] env: - name: ES_USERNAME value: "elastic" - name: ES_PASSWORD value: "" - name: ES_HOST value: "" - name: NODE_NAME valueFrom: fieldRef: fieldPath: spec.nodeName - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name securityContext: runAsUser: 0 resources: limits: memory: 700Mi requests: cpu: 100m memory: 400Mi volumeMounts: - name: datastreams mountPath: /etc/agent.yml readOnly: true subPath: agent.yml - name: proc mountPath: /hostfs/proc readOnly: true - name: cgroup mountPath: /hostfs/sys/fs/cgroup readOnly: true - name: varlibdockercontainers mountPath: /var/lib/docker/containers readOnly: true - name: varlog mountPath: /var/log readOnly: true - name: etc-kubernetes mountPath: /hostfs/etc/kubernetes readOnly: true - name: var-lib mountPath: /hostfs/var/lib readOnly: true - name: passwd mountPath: /hostfs/etc/passwd readOnly: true - name: group mountPath: /hostfs/etc/group readOnly: true - name: etcsysmd mountPath: /hostfs/etc/systemd readOnly: true volumes: - name: datastreams configMap: defaultMode: 0640 name: agent-node-datastreams - name: proc hostPath: path: /proc - name: cgroup hostPath: path: /sys/fs/cgroup - name: varlibdockercontainers hostPath: path: /var/lib/docker/containers - name: varlog hostPath: path: /var/log - name: etc-kubernetes hostPath: path: /etc/kubernetes - name: var-lib hostPath: path: /var/lib - name: passwd hostPath: path: /etc/passwd - name: group hostPath: path: /etc/group - name: etcsysmd hostPath: path: /etc/systemd --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: elastic-agent-standalone subjects: - kind: ServiceAccount name: elastic-agent-standalone namespace: kube-system roleRef: kind: ClusterRole name: elastic-agent-standalone apiGroup: rbac.authorization.k8s.io --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: namespace: kube-system name: elastic-agent-standalone subjects: - kind: ServiceAccount name: elastic-agent-standalone namespace: kube-system roleRef: kind: Role name: elastic-agent-standalone apiGroup: rbac.authorization.k8s.io --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: elastic-agent-standalone-kubeadm-config namespace: kube-system subjects: - kind: ServiceAccount name: elastic-agent-standalone namespace: kube-system roleRef: kind: Role name: elastic-agent-standalone-kubeadm-config apiGroup: rbac.authorization.k8s.io --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: elastic-agent-standalone labels: k8s-app: elastic-agent-standalone rules: - apiGroups: [""] resources: - nodes - namespaces - events - pods - services - configmaps - serviceaccounts - persistentvolumes - persistentvolumeclaims verbs: ["get", "list", "watch"] # Enable this rule only if planing to use kubernetes_secrets provider #- apiGroups: [""] # resources: # - secrets # verbs: ["get"] - apiGroups: ["extensions"] resources: - replicasets verbs: ["get", "list", "watch"] - apiGroups: ["apps"] resources: - statefulsets - deployments - replicasets - daemonsets verbs: ["get", "list", "watch"] - apiGroups: ["batch"] resources: - jobs - cronjobs verbs: ["get", "list", "watch"] - apiGroups: - "" resources: - nodes/stats verbs: - get # required for apiserver - nonResourceURLs: - "/metrics" verbs: - get - apiGroups: ["rbac.authorization.k8s.io"] resources: - clusterrolebindings - clusterroles - rolebindings - roles verbs: ["get", "list", "watch"] - apiGroups: ["policy"] resources: - podsecuritypolicies verbs: ["get", "list", "watch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: elastic-agent-standalone # should be the namespace where elastic-agent is running namespace: kube-system labels: k8s-app: elastic-agent-standalone rules: - apiGroups: - coordination.k8s.io resources: - leases verbs: ["get", "create", "update"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: elastic-agent-standalone-kubeadm-config namespace: kube-system labels: k8s-app: elastic-agent-standalone rules: - apiGroups: [""] resources: - configmaps resourceNames: - kubeadm-config verbs: ["get"] --- apiVersion: v1 kind: ServiceAccount metadata: name: elastic-agent-standalone namespace: kube-system labels: k8s-app: elastic-agent-standalone ---