diff --git a/incubator/fluentd/Chart.yaml b/incubator/fluentd/Chart.yaml
index c2b0cd51abb6..1341e9290149 100644
--- a/incubator/fluentd/Chart.yaml
+++ b/incubator/fluentd/Chart.yaml
@@ -2,7 +2,7 @@ apiVersion: v1
description: A Fluentd Elasticsearch Helm chart for Kubernetes.
icon: https://raw.githubusercontent.com/fluent/fluentd-docs/master/public/logo/Fluentd_square.png
name: fluentd
-version: 0.1.4
+version: 0.2.0
appVersion: 0.12
home: https://www.fluentd.org/
sources:
diff --git a/incubator/fluentd/README.md b/incubator/fluentd/README.md
new file mode 100644
index 000000000000..b2a3a8025525
--- /dev/null
+++ b/incubator/fluentd/README.md
@@ -0,0 +1,490 @@
+# Fluentd
+
+Helm chart to run [fluentd](https://www.fluentd.org/) on kubernetes.
+
+## Use cases
+
+We tried to write chart flexible enough to use in all possible configurations
+
+### Listen port and save logs to Elastic Search
+
+This configuration is **Default**, so we provide `quick start` and `full config` example to install.
+
+#### Quick start
+`helm install --name logs incubator/fluentd`
+Now you can forward logs to port `24220` and it would be saved to Elastic Search `elasticsearch-client.default.svc.cluster.local`
+
+#### Full config
+
+Create `values.yaml` file containing this values (replace ${%VARS%} with values for your infrastructure)
+
+```yaml
+image:
+ repository: gcr.io/google-containers/fluentd-elasticsearch
+ tag: v2.0.4
+
+env:
+ open:
+ OUTPUT_HOST: ${ELASTIC_SEARCH_HOST}
+ OUTPUT_PORT: ${ELASTIC_SEARCH_PORT}
+ OUTPUT_BUFFER_CHUNK_LIMIT: 2M
+ OUTPUT_BUFFER_QUEUE_LIMIT: 8
+
+configDir: /etc/fluent/config.d
+configMap:
+ general.conf: |
+
+ @type null
+
+
+
+ forward-input.conf: |
+
+ output.conf: |
+
+ @id elasticsearch
+ @type elasticsearch
+ @log_level info
+ include_tag_key true
+ # Replace with the host/port to your Elasticsearch cluster.
+ host "#{ENV['OUTPUT_HOST']}"
+ port "#{ENV['OUTPUT_PORT']}"
+ logstash_format true
+
+ @type file
+ path /var/log/fluentd-buffers/kubernetes.system.buffer
+ flush_mode interval
+ retry_type exponential_backoff
+ flush_thread_count 2
+ flush_interval 5s
+ retry_forever
+ retry_max_interval 30
+ chunk_limit_size "#{ENV['OUTPUT_BUFFER_CHUNK_LIMIT']}"
+ queue_limit_length "#{ENV['OUTPUT_BUFFER_QUEUE_LIMIT']}"
+ overflow_action block
+
+
+
+service:
+ type: ClusterIP
+ ports:
+ - name: "monitor-agent"
+ protocol: TCP
+ externalPort: ${FLUENTD_PORT}
+ containerPort: ${FLUENTD_PORT}
+```
+
+`helm install --name logs incubator/fluentd -f values.yaml`
+
+
+## Read k8s logs and forward to Elastic Search
+
+This is installation can be replacement for [fluentd-elasticsearch](https://github.com/kubernetes/charts/tree/master/incubator/fluentd-elasticsearch)
+
+
+### Use fluentd official containers for kubernetes
+
+Create `values.yaml` file containing this values (replace ${%VARS%} with values for your infrastructure)
+
+```yaml
+image:
+ repository: fluent/fluentd-kubernetes-daemonset
+ tag: v0.12.33-elasticsearch
+env:
+ open:
+ FLUENT_ELASTICSEARCH_HOST: ${FLUENT_ELASTICSEARCH_HOST}
+ FLUENT_ELASTICSEARCH_PORT: ${FLUENT_ELASTICSEARCH_PORT}
+ secret:
+ FLUENT_ELASTICSEARCH_USER: ${FLUENT_ELASTICSEARCH_USER}
+ FLUENT_ELASTICSEARCH_PASSWORD: ${FLUENT_ELASTICSEARCH_PASSWORD}
+## Mount fluentd configs from chart into tmp dir, because all required configs build in image
+## https://github.com/fluent/fluentd-kubernetes-daemonset/tree/master/docker-image/v0.12/alpine-elasticsearch/conf
+configDir: /tmp/conf
+```
+
+`helm install --name logs incubator/fluentd -f values.yaml`
+
+### Use custom configs
+
+This is approach used in [fluentd-elasticsearch](https://github.com/kubernetes/charts/tree/master/incubator/fluentd-elasticsearch)
+
+Create `values.yaml` file containing this values (replace ${%VARS%} with values for your infrastructure)
+
+```yaml
+image:
+ repository: gcr.io/google-containers/fluentd-elasticsearch
+ tag: v2.0.4
+
+env:
+ open:
+ OUTPUT_HOST: ${ELASTIC_SEARCH_HOST}
+ OUTPUT_PORT: ${ELASTIC_SEARCH_PORT}
+ OUTPUT_BUFFER_CHUNK_LIMIT: 2M
+ OUTPUT_BUFFER_QUEUE_LIMIT: 8
+
+configDir: /etc/fluent/config.d
+configMap:
+ system.conf: |-
+
+ root_dir /tmp/fluentd-buffers/
+
+ containers.input.conf: |-
+
+ # Detect exceptions in the log output and forward them as one log entry.
+
+ @id raw.kubernetes
+ @type detect_exceptions
+ remove_tag_prefix raw
+ message log
+ stream stream
+ multiline_flush_interval 5
+ max_bytes 500000
+ max_lines 1000
+
+ system.input.conf: |-
+ # Example:
+ # 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081
+
+ # Example:
+ # Dec 21 23:17:22 gke-foo-1-1-4b5cbd14-node-4eoj startupscript: Finished running startup script /var/run/google.startup.script
+
+ # Examples:
+ # time="2016-02-04T06:51:03.053580605Z" level=info msg="GET /containers/json"
+ # time="2016-02-04T07:53:57.505612354Z" level=error msg="HTTP Error" err="No such image: -f" statusCode=404
+
+ # Example:
+ # 2016/02/04 06:52:38 filePurge: successfully removed file /var/etcd/data/member/wal/00000000000006d0-00000000010a23d1.wal
+
+ # Multi-line parsing is required for all the kube logs because very large log
+ # statements, such as those that include entire object bodies, get split into
+ # multiple lines by glog.
+ # Example:
+ # I0204 07:32:30.020537 3368 server.go:1048] POST /stats/container/: (13.972191ms) 200 [[Go-http-client/1.1] 10.244.1.3:40537]
+