diff --git a/incubator/fluentd/Chart.yaml b/incubator/fluentd/Chart.yaml index c2b0cd51abb6..1341e9290149 100644 --- a/incubator/fluentd/Chart.yaml +++ b/incubator/fluentd/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v1 description: A Fluentd Elasticsearch Helm chart for Kubernetes. icon: https://raw.githubusercontent.com/fluent/fluentd-docs/master/public/logo/Fluentd_square.png name: fluentd -version: 0.1.4 +version: 0.2.0 appVersion: 0.12 home: https://www.fluentd.org/ sources: diff --git a/incubator/fluentd/README.md b/incubator/fluentd/README.md new file mode 100644 index 000000000000..b2a3a8025525 --- /dev/null +++ b/incubator/fluentd/README.md @@ -0,0 +1,490 @@ +# Fluentd + +Helm chart to run [fluentd](https://www.fluentd.org/) on kubernetes. + +## Use cases + +We tried to write chart flexible enough to use in all possible configurations + +### Listen port and save logs to Elastic Search + +This configuration is **Default**, so we provide `quick start` and `full config` example to install. + +#### Quick start +`helm install --name logs incubator/fluentd` +Now you can forward logs to port `24220` and it would be saved to Elastic Search `elasticsearch-client.default.svc.cluster.local` + +#### Full config + +Create `values.yaml` file containing this values (replace ${%VARS%} with values for your infrastructure) + +```yaml +image: + repository: gcr.io/google-containers/fluentd-elasticsearch + tag: v2.0.4 + +env: + open: + OUTPUT_HOST: ${ELASTIC_SEARCH_HOST} + OUTPUT_PORT: ${ELASTIC_SEARCH_PORT} + OUTPUT_BUFFER_CHUNK_LIMIT: 2M + OUTPUT_BUFFER_QUEUE_LIMIT: 8 + +configDir: /etc/fluent/config.d +configMap: + general.conf: | + + @type null + + + + @type monitor_agent + bind 0.0.0.0 + port ${FLUENTD_PORT} + tag fluentd.monitor.metrics + + forward-input.conf: | + + @type forward + port 24224 + bind 0.0.0.0 + + output.conf: | + + @id elasticsearch + @type elasticsearch + @log_level info + include_tag_key true + # Replace with the host/port to your Elasticsearch cluster. + host "#{ENV['OUTPUT_HOST']}" + port "#{ENV['OUTPUT_PORT']}" + logstash_format true + + @type file + path /var/log/fluentd-buffers/kubernetes.system.buffer + flush_mode interval + retry_type exponential_backoff + flush_thread_count 2 + flush_interval 5s + retry_forever + retry_max_interval 30 + chunk_limit_size "#{ENV['OUTPUT_BUFFER_CHUNK_LIMIT']}" + queue_limit_length "#{ENV['OUTPUT_BUFFER_QUEUE_LIMIT']}" + overflow_action block + + + +service: + type: ClusterIP + ports: + - name: "monitor-agent" + protocol: TCP + externalPort: ${FLUENTD_PORT} + containerPort: ${FLUENTD_PORT} +``` + +`helm install --name logs incubator/fluentd -f values.yaml` + + +## Read k8s logs and forward to Elastic Search + +This is installation can be replacement for [fluentd-elasticsearch](https://github.com/kubernetes/charts/tree/master/incubator/fluentd-elasticsearch) + + +### Use fluentd official containers for kubernetes + +Create `values.yaml` file containing this values (replace ${%VARS%} with values for your infrastructure) + +```yaml +image: + repository: fluent/fluentd-kubernetes-daemonset + tag: v0.12.33-elasticsearch +env: + open: + FLUENT_ELASTICSEARCH_HOST: ${FLUENT_ELASTICSEARCH_HOST} + FLUENT_ELASTICSEARCH_PORT: ${FLUENT_ELASTICSEARCH_PORT} + secret: + FLUENT_ELASTICSEARCH_USER: ${FLUENT_ELASTICSEARCH_USER} + FLUENT_ELASTICSEARCH_PASSWORD: ${FLUENT_ELASTICSEARCH_PASSWORD} +## Mount fluentd configs from chart into tmp dir, because all required configs build in image +## https://github.com/fluent/fluentd-kubernetes-daemonset/tree/master/docker-image/v0.12/alpine-elasticsearch/conf +configDir: /tmp/conf +``` + +`helm install --name logs incubator/fluentd -f values.yaml` + +### Use custom configs + +This is approach used in [fluentd-elasticsearch](https://github.com/kubernetes/charts/tree/master/incubator/fluentd-elasticsearch) + +Create `values.yaml` file containing this values (replace ${%VARS%} with values for your infrastructure) + +```yaml +image: + repository: gcr.io/google-containers/fluentd-elasticsearch + tag: v2.0.4 + +env: + open: + OUTPUT_HOST: ${ELASTIC_SEARCH_HOST} + OUTPUT_PORT: ${ELASTIC_SEARCH_PORT} + OUTPUT_BUFFER_CHUNK_LIMIT: 2M + OUTPUT_BUFFER_QUEUE_LIMIT: 8 + +configDir: /etc/fluent/config.d +configMap: + system.conf: |- + + root_dir /tmp/fluentd-buffers/ + + containers.input.conf: |- + + @id fluentd-containers.log + @type tail + path /var/log/containers/*.log + pos_file /var/log/fluentd-containers.log.pos + time_format %Y-%m-%dT%H:%M:%S.%NZ + tag raw.kubernetes.* + format json + read_from_head true + + # Detect exceptions in the log output and forward them as one log entry. + + @id raw.kubernetes + @type detect_exceptions + remove_tag_prefix raw + message log + stream stream + multiline_flush_interval 5 + max_bytes 500000 + max_lines 1000 + + system.input.conf: |- + # Example: + # 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081 + + @id minion + @type tail + format /^(?