containers.input.conf:|- # This configuration file for Fluentd / td-agent is used # to watch changes to Docker log files. The kubelet creates symlinks that # capture the pod name, namespace, container name & Docker container ID # to the docker logs for pods in the /var/log/containers directory on the host. # If running this fluentd configuration in a Docker container, the /var/log # directory should be mounted in the container. # # These logs are then submitted to Elasticsearch which assumes the # installation of the fluent-plugin-elasticsearch & the # fluent-plugin-kubernetes_metadata_filter plugins. # See https://github.com/uken/fluent-plugin-elasticsearch & # https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter for # more information about the plugins. # # Example # ======= # A line in the Docker log file might look like this JSON: # # {"log":"2014/09/25 21:15:03 Got request with path wombat\n", # "stream":"stderr", # "time":"2014-09-25T21:15:03.499185026Z"} # # The time_format specification below makes sure we properly # parse the time format produced by Docker. This will be # submitted to Elasticsearch and should appear like: # $ curl 'http://elasticsearch-logging:9200/_search?pretty' # ... # { # "_index" : "logstash-2014.09.25", # "_type" : "fluentd", # "_id" : "VBrbor2QTuGpsQyTCdfzqA", # "_score" : 1.0, # "_source":{"log":"2014/09/25 22:45:50 Got request with path wombat\n", # "stream":"stderr","tag":"docker.container.all", # "@timestamp":"2014-09-25T22:45:50+00:00"} # }, # ... # # The Kubernetes fluentd plugin is used to write the Kubernetes metadata to the log # record & add labels to the log record if properly configured. This enables users # to filter & search logs on any metadata. # For example a Docker container's logs might be in the directory: # # /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b # # and in the file: # # 997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log # # where 997599971ee6... is the Docker ID of the running container. # The Kubernetes kubelet makes a symbolic link to this file on the host machine # in the /var/log/containers directory which includes the pod name and the Kubernetes # container name: # # synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log # -> # /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log # # The /var/log directory on the host is mapped to the /var/log directory in the container # running this instance of Fluentd and we end up collecting the file: # # /var/log/containers/synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log # # This results in the tag: # # var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log # # The Kubernetes fluentd plugin is used to extract the namespace, pod name & container name # which are added to the log message as a kubernetes field object & the Docker container ID # is also added under the docker field object. # The final tag is: # # kubernetes.var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log # # And the final log record look like: # # { # "log":"2014/09/25 21:15:03 Got request with path wombat\n", # "stream":"stderr", # "time":"2014-09-25T21:15:03.499185026Z", # "kubernetes": { # "namespace": "default", # "pod_name": "synthetic-logger-0.25lps-pod", # "container_name": "synth-lgr" # }, # "docker": { # "container_id": "997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b" # } # } # # This makes it easier for users to search for logs by pod name or by # the name of the Kubernetes container regardless of how many times the # Kubernetes pod has been restarted (resulting in a several Docker container IDs).
# Json Log Example: # {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"} # CRI Log Example: # 2016-02-17T00:04:05.931087621Z stdout F [info:2016-02-16T16:04:05.930-08:00] Some log text here <source> @idfluentd-containers.log @typetail path/var/log/containers/*.log pos_file/var/log/es-containers.log.pos tagraw.kubernetes.* read_from_headtrue <parse> @typemulti_format <pattern> formatjson time_keytime time_format%Y-%m-%dT%H:%M:%S.%NZ </pattern> <pattern> format/^(?<time>.+)(?<stream>stdout|stderr)[^]*(?<log>.*)$/ time_format%Y-%m-%dT%H:%M:%S.%N%:z </pattern> </parse> </source>
# Detect exceptions in the log output and forward them as one log entry. <matchraw.kubernetes.**> @idraw.kubernetes @typedetect_exceptions remove_tag_prefixraw messagelog streamstream multiline_flush_interval5 max_bytes500000 max_lines1000 </match>
system.input.conf:|- # Example: # 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081 <source> @idminion @typetail format/^(?<time>[^]*[^,]*)[^\[]*\[[^\]]*\]\[(?<severity>[^\]]*)*\](?<message>.*)$/ time_format%Y-%m-%d%H:%M:%S path/var/log/salt/minion pos_file/var/log/salt.pos tagsalt </source>
# Example: # 2016/02/04 06:52:38 filePurge: successfully removed file /var/etcd/data/member/wal/00000000000006d0-00000000010a23d1.wal <source> @idetcd.log @typetail # Not parsing this, because it doesn't have anything particularly useful to # parse out of it (like severities). formatnone path/var/log/etcd.log pos_file/var/log/es-etcd.log.pos tagetcd </source>
# Multi-line parsing is required for all the kube logs because very large log # statements, such as those that include entire object bodies, get split into # multiple lines by glog.
# Example: # I1118 21:26:53.975789 6 proxier.go:1096] Port "nodePort for kube-system/default-http-backend:http" (:31429/tcp) was open before and is still needed <source> @idkube-proxy.log @typetail formatmultiline multiline_flush_interval5s format_firstline/^\w\d{4}/ format1/^(?<severity>\w)(?<time>\d{4}[^\s]*)\s+(?<pid>\d+)\s+(?<source>[^\]]+)\](?<message>.*)/ time_format%m%d%H:%M:%S.%N path/var/log/kube-proxy.log pos_file/var/log/es-kube-proxy.log.pos tagkube-proxy </source>
# Example: # I0204 06:55:31.872680 5 servicecontroller.go:277] LB already exists and doesn't need update for service kube-system/kube-ui <source> @idkube-controller-manager.log @typetail formatmultiline multiline_flush_interval5s format_firstline/^\w\d{4}/ format1/^(?<severity>\w)(?<time>\d{4}[^\s]*)\s+(?<pid>\d+)\s+(?<source>[^\]]+)\](?<message>.*)/ time_format%m%d%H:%M:%S.%N path/var/log/kube-controller-manager.log pos_file/var/log/es-kube-controller-manager.log.pos tagkube-controller-manager </source>
# Example: # W0204 06:49:18.239674 7 reflector.go:245] pkg/scheduler/factory/factory.go:193: watch of *api.Service ended with: 401: The event in requested index is outdated and cleared (the requested history has been cleared [2578313/2577886]) [2579312] <source> @idkube-scheduler.log @typetail formatmultiline multiline_flush_interval5s format_firstline/^\w\d{4}/ format1/^(?<severity>\w)(?<time>\d{4}[^\s]*)\s+(?<pid>\d+)\s+(?<source>[^\]]+)\](?<message>.*)/ time_format%m%d%H:%M:%S.%N path/var/log/kube-scheduler.log pos_file/var/log/es-kube-scheduler.log.pos tagkube-scheduler </source>