Skip to content
This repository has been archived by the owner on Apr 17, 2019. It is now read-only.

[nginx-ingress-controller] Allow custom health checks in upstreams #1002

Merged
merged 1 commit into from
May 30, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion ingress/controllers/nginx/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

FROM gcr.io/google_containers/nginx-slim:0.6
FROM gcr.io/google_containers/nginx-slim:0.7

RUN apt-get update && apt-get install -y \
diffutils \
Expand Down
18 changes: 17 additions & 1 deletion ingress/controllers/nginx/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -196,6 +196,23 @@ Use the [custom-template](examples/custom-template/README.md) example as a guide
**Please note the template is tied to the go code. Be sure to no change names in the variable `$cfg`**


### Custom NGINX upstream checks

NGINX exposes some flags in the [upstream configuration](http://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) that enabled configuration of each server in the upstream. The ingress controller allows custom `max_fails` and `fail_timeout` parameters in a global context using `upstream-max-fails` or `upstream-fail-timeout` in the NGINX Configmap or in a particular Ingress rule. By default this values are 0. This means NGINX will respect the `livenessProbe`, if is defined. If there is no probe, NGINX will not mark a server inside an upstream down.

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

s/liveness/readiness
Also this language makes me think nginx scrapes the pods for their readiness probes but that's not the case. Please clarify with something like: nginx will not health check your backends, and whenever the endpoints controller notices a readiness probe failure that pod's ip will be removed from the list of endpoints, causing nginx to also remove it from the upstreams.


To use custom values in an Ingress rule define this annotations:

`ingress-nginx.kubernetes.io/upstream-max-fails`: number of unsuccessful attempts to communicate with the server that should happen in the duration set by the fail_timeout parameter to consider the server unavailable

`ingress-nginx.kubernetes.io/upstream-fail-timeout`: time in seconds during which the specified number of unsuccessful attempts to communicate with the server should happen to consider the server unavailable. Also the period of time the server will be considered unavailable.

**Important:**
The upstreams are shared. i.e. Ingress rule using the same service will use the same upstream.
This means only one of the rules should define annotations to configure the upstream servers


Please check the [auth](examples/custom-upstream-check/README.md) example


### NGINX status page

Expand All @@ -209,7 +226,6 @@ Please check the example `example/rc-default.yaml`
To extract the information in JSON format the module provides a custom URL: `/nginx_status/format/json`



### Custom errors

In case of an error in a request the body of the response is obtained from the `default backend`. Each request to the default backend includes two headers:
Expand Down
33 changes: 21 additions & 12 deletions ingress/controllers/nginx/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ import (
"k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/watch"

"k8s.io/contrib/ingress/controllers/nginx/healthcheck"
"k8s.io/contrib/ingress/controllers/nginx/nginx"
)

Expand Down Expand Up @@ -327,9 +328,6 @@ func (lbc *loadBalancerController) sync(key string) {
return
}

ings := lbc.ingLister.Store.List()
upstreams, servers := lbc.getUpstreamServers(ings)

var cfg *api.ConfigMap

ns, name, _ := parseNsName(lbc.nxgConfigMap)
Expand All @@ -339,6 +337,10 @@ func (lbc *loadBalancerController) sync(key string) {
}

ngxConfig := lbc.nginx.ReadConfig(cfg)

ings := lbc.ingLister.Store.List()
upstreams, servers := lbc.getUpstreamServers(ngxConfig, ings)

lbc.nginx.CheckAndReload(ngxConfig, nginx.IngressConfig{
Upstreams: upstreams,
Servers: servers,
Expand Down Expand Up @@ -489,15 +491,15 @@ func (lbc *loadBalancerController) getStreamServices(data map[string]string, pro
if err != nil {
for _, sp := range svc.Spec.Ports {
if sp.Name == svcPort {
endps = lbc.getEndpoints(svc, sp.TargetPort, proto)
endps = lbc.getEndpoints(svc, sp.TargetPort, proto, &healthcheck.Upstream{})
break
}
}
} else {
// we need to use the TargetPort (where the endpoints are running)
for _, sp := range svc.Spec.Ports {
if sp.Port == int32(targetPort) {
endps = lbc.getEndpoints(svc, sp.TargetPort, proto)
endps = lbc.getEndpoints(svc, sp.TargetPort, proto, &healthcheck.Upstream{})
break
}
}
Expand Down Expand Up @@ -542,7 +544,7 @@ func (lbc *loadBalancerController) getDefaultUpstream() *nginx.Upstream {

svc := svcObj.(*api.Service)

endps := lbc.getEndpoints(svc, svc.Spec.Ports[0].TargetPort, api.ProtocolTCP)
endps := lbc.getEndpoints(svc, svc.Spec.Ports[0].TargetPort, api.ProtocolTCP, &healthcheck.Upstream{})
if len(endps) == 0 {
glog.Warningf("service %v does no have any active endpoints", svcKey)
upstream.Backends = append(upstream.Backends, nginx.NewDefaultServer())
Expand All @@ -553,8 +555,8 @@ func (lbc *loadBalancerController) getDefaultUpstream() *nginx.Upstream {
return upstream
}

func (lbc *loadBalancerController) getUpstreamServers(data []interface{}) ([]*nginx.Upstream, []*nginx.Server) {
upstreams := lbc.createUpstreams(data)
func (lbc *loadBalancerController) getUpstreamServers(ngxCfg nginx.NginxConfiguration, data []interface{}) ([]*nginx.Upstream, []*nginx.Server) {
upstreams := lbc.createUpstreams(ngxCfg, data)
upstreams[defUpstreamName] = lbc.getDefaultUpstream()

servers := lbc.createServers(data)
Expand Down Expand Up @@ -655,12 +657,14 @@ func (lbc *loadBalancerController) getUpstreamServers(data []interface{}) ([]*ng

// createUpstreams creates the NGINX upstreams for each service referenced in
// Ingress rules. The servers inside the upstream are endpoints.
func (lbc *loadBalancerController) createUpstreams(data []interface{}) map[string]*nginx.Upstream {
func (lbc *loadBalancerController) createUpstreams(ngxCfg nginx.NginxConfiguration, data []interface{}) map[string]*nginx.Upstream {
upstreams := make(map[string]*nginx.Upstream)

for _, ingIf := range data {
ing := ingIf.(*extensions.Ingress)

hz := healthcheck.ParseAnnotations(ngxCfg, ing)

for _, rule := range ing.Spec.Rules {
if rule.IngressRuleValue.HTTP == nil {
continue
Expand Down Expand Up @@ -693,7 +697,7 @@ func (lbc *loadBalancerController) createUpstreams(data []interface{}) map[strin
for _, servicePort := range svc.Spec.Ports {
// targetPort could be a string, use the name or the port (int)
if strconv.Itoa(int(servicePort.Port)) == bp || servicePort.TargetPort.String() == bp || servicePort.Name == bp {
endps := lbc.getEndpoints(svc, servicePort.TargetPort, api.ProtocolTCP)
endps := lbc.getEndpoints(svc, servicePort.TargetPort, api.ProtocolTCP, hz)
if len(endps) == 0 {
glog.Warningf("service %v does no have any active endpoints", svcKey)
}
Expand Down Expand Up @@ -801,7 +805,7 @@ func (lbc *loadBalancerController) getPemsFromIngress(data []interface{}) map[st
}

// getEndpoints returns a list of <endpoint ip>:<port> for a given service/target port combination.
func (lbc *loadBalancerController) getEndpoints(s *api.Service, servicePort intstr.IntOrString, proto api.Protocol) []nginx.UpstreamServer {
func (lbc *loadBalancerController) getEndpoints(s *api.Service, servicePort intstr.IntOrString, proto api.Protocol, hz *healthcheck.Upstream) []nginx.UpstreamServer {
glog.V(3).Infof("getting endpoints for service %v/%v and port %v", s.Namespace, s.Name, servicePort.String())
ep, err := lbc.endpLister.GetServiceEndpoints(s)
if err != nil {
Expand Down Expand Up @@ -859,7 +863,12 @@ func (lbc *loadBalancerController) getEndpoints(s *api.Service, servicePort ints
}

for _, epAddress := range ss.Addresses {
ups := nginx.UpstreamServer{Address: epAddress.IP, Port: fmt.Sprintf("%v", targetPort)}
ups := nginx.UpstreamServer{
Address: epAddress.IP,
Port: fmt.Sprintf("%v", targetPort),
MaxFails: hz.MaxFails,
FailTimeout: hz.FailTimeout,
}
upsServers = append(upsServers, ups)
}
}
Expand Down
45 changes: 45 additions & 0 deletions ingress/controllers/nginx/examples/custom-upstream-check/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@

This example shows how is possible to create a custom configuration for a particular upstream associated with an Ingress rule.

echo "
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: echoheaders
annotations:
ingress-nginx.kubernetes.io/upstream-fail-timeout: "30"
spec:
rules:
- host: foo.bar.com
http:
paths:
- path: /
backend:
serviceName: echoheaders
servicePort: 80
" | kubectl create -f -


Check the annotation is present in the Ingress rule:
```
kubectl get ingress echoheaders -o yaml
``
Check the NGINX configuration is updated using kubectl or the status page:
```
$ kubectl exec nginx-ingress-controller-v1ppm cat /etc/nginx/nginx.conf
```
```
....
upstream default-echoheaders-x-80 {
least_conn;
server 10.2.92.2:8080 max_fails=5 fail_timeout=30;

}
....
```
![nginx-module-vts](contrib/ingress/controllers/nginx/examples/custom-upstream-check/custom-upstream.png "screenshot with custom configuration")
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
101 changes: 101 additions & 0 deletions ingress/controllers/nginx/healthcheck/main.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package healthcheck

import (
"errors"
"strconv"

"k8s.io/kubernetes/pkg/apis/extensions"

"k8s.io/contrib/ingress/controllers/nginx/nginx"
)

const (
upsMaxFails = "ingress-nginx.kubernetes.io/upstream-max-fails"
upsFailTimeout = "ingress-nginx.kubernetes.io/upstream-fail-timeout"
)

var (
// ErrMissingMaxFails returned error when the ingress does not contains the
// max-fails annotation
ErrMissingMaxFails = errors.New("max-fails annotations is missing")

// ErrMissingFailTimeout returned error when the ingress does not contains
// the fail-timeout annotation
ErrMissingFailTimeout = errors.New("fail-timeout annotations is missing")

// ErrInvalidNumber returned
ErrInvalidNumber = errors.New("the annotation does not contains a number")
)

// Upstream returns the URL and method to use check the status of
// the upstream server/s
type Upstream struct {
MaxFails int
FailTimeout int
}

type ingAnnotations map[string]string

func (a ingAnnotations) maxFails() (int, error) {
val, ok := a[upsMaxFails]
if !ok {
return 0, ErrMissingMaxFails
}

mf, err := strconv.Atoi(val)
if err != nil {
return 0, ErrInvalidNumber
}

return mf, nil
}

func (a ingAnnotations) failTimeout() (int, error) {
val, ok := a[upsFailTimeout]
if !ok {
return 0, ErrMissingFailTimeout
}

ft, err := strconv.Atoi(val)
if err != nil {
return 0, ErrInvalidNumber
}

return ft, nil
}

// ParseAnnotations parses the annotations contained in the ingress
// rule used to configure upstream check parameters
func ParseAnnotations(cfg nginx.NginxConfiguration, ing *extensions.Ingress) *Upstream {
if ing.GetAnnotations() == nil {
return &Upstream{cfg.UpstreamMaxFails, cfg.UpstreamFailTimeout}
}

mf, err := ingAnnotations(ing.GetAnnotations()).maxFails()
if err != nil {
mf = cfg.UpstreamMaxFails
}

ft, err := ingAnnotations(ing.GetAnnotations()).failTimeout()
if err != nil {
ft = cfg.UpstreamFailTimeout
}

return &Upstream{mf, ft}
}
Loading