Skip to content

Commit

Permalink
Merge pull request #130 from weaveworks/metrics-refactor
Browse files Browse the repository at this point in the history
Refactor the metrics observer
  • Loading branch information
stefanprodan authored Apr 2, 2019
2 parents b945b37 + 3e43963 commit ff7c0a1
Show file tree
Hide file tree
Showing 15 changed files with 520 additions and 315 deletions.
3 changes: 2 additions & 1 deletion cmd/flagger/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (
informers "github.com/weaveworks/flagger/pkg/client/informers/externalversions"
"github.com/weaveworks/flagger/pkg/controller"
"github.com/weaveworks/flagger/pkg/logging"
"github.com/weaveworks/flagger/pkg/metrics"
"github.com/weaveworks/flagger/pkg/notifier"
"github.com/weaveworks/flagger/pkg/server"
"github.com/weaveworks/flagger/pkg/signals"
Expand Down Expand Up @@ -105,7 +106,7 @@ func main() {
logger.Infof("Watching namespace %s", namespace)
}

ok, err := controller.CheckMetricsServer(metricsServer)
ok, err := metrics.CheckMetricsServer(metricsServer)
if ok {
logger.Infof("Connected to metrics server %s", metricsServer)
} else {
Expand Down
12 changes: 4 additions & 8 deletions pkg/controller/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,8 @@ type Controller struct {
canaries *sync.Map
jobs map[string]CanaryJob
deployer CanaryDeployer
observer CanaryObserver
recorder metrics.CanaryRecorder
observer metrics.Observer
recorder metrics.Recorder
notifier *notifier.Slack
meshProvider string
}
Expand Down Expand Up @@ -81,11 +81,7 @@ func NewController(
},
}

observer := CanaryObserver{
metricsServer: metricServer,
}

recorder := metrics.NewCanaryRecorder(controllerAgentName, true)
recorder := metrics.NewRecorder(controllerAgentName, true)
recorder.SetInfo(version, meshProvider)

ctrl := &Controller{
Expand All @@ -101,7 +97,7 @@ func NewController(
jobs: map[string]CanaryJob{},
flaggerWindow: flaggerWindow,
deployer: deployer,
observer: observer,
observer: metrics.NewObserver(metricServer),
recorder: recorder,
notifier: notifier,
meshProvider: meshProvider,
Expand Down
8 changes: 3 additions & 5 deletions pkg/controller/controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ type Mocks struct {
meshClient clientset.Interface
flaggerClient clientset.Interface
deployer CanaryDeployer
observer CanaryObserver
observer metrics.Observer
ctrl *Controller
logger *zap.SugaredLogger
router router.Interface
Expand Down Expand Up @@ -74,9 +74,7 @@ func SetupMocks(abtest bool) Mocks {
flaggerClient: flaggerClient,
},
}
observer := CanaryObserver{
metricsServer: "fake",
}
observer := metrics.NewObserver("fake")

// init controller
flaggerInformerFactory := informers.NewSharedInformerFactory(flaggerClient, noResyncPeriodFunc())
Expand All @@ -95,7 +93,7 @@ func SetupMocks(abtest bool) Mocks {
flaggerWindow: time.Second,
deployer: deployer,
observer: observer,
recorder: metrics.NewCanaryRecorder(controllerAgentName, false),
recorder: metrics.NewRecorder(controllerAgentName, false),
}
ctrl.flaggerSynced = alwaysReady

Expand Down
257 changes: 0 additions & 257 deletions pkg/controller/observer.go

This file was deleted.

12 changes: 6 additions & 6 deletions pkg/controller/scheduler.go
Original file line number Diff line number Diff line change
Expand Up @@ -501,7 +501,7 @@ func (c *Controller) analyseCanary(r *flaggerv1.Canary) bool {
c.recordEventWarningf(r, "Halt advancement no values found for metric %s probably %s.%s is not receiving traffic",
metric.Name, r.Spec.TargetRef.Name, r.Namespace)
} else {
c.recordEventErrorf(r, "Metrics server %s query failed: %v", c.observer.metricsServer, err)
c.recordEventErrorf(r, "Metrics server %s query failed: %v", c.observer.GetMetricsServer(), err)
}
return false
}
Expand All @@ -513,13 +513,13 @@ func (c *Controller) analyseCanary(r *flaggerv1.Canary) bool {
}

if metric.Name == "istio_requests_total" {
val, err := c.observer.GetDeploymentCounter(r.Spec.TargetRef.Name, r.Namespace, metric.Name, metric.Interval)
val, err := c.observer.GetIstioSuccessRate(r.Spec.TargetRef.Name, r.Namespace, metric.Name, metric.Interval)
if err != nil {
if strings.Contains(err.Error(), "no values found") {
c.recordEventWarningf(r, "Halt advancement no values found for metric %s probably %s.%s is not receiving traffic",
metric.Name, r.Spec.TargetRef.Name, r.Namespace)
} else {
c.recordEventErrorf(r, "Metrics server %s query failed: %v", c.observer.metricsServer, err)
c.recordEventErrorf(r, "Metrics server %s query failed: %v", c.observer.GetMetricsServer(), err)
}
return false
}
Expand All @@ -531,9 +531,9 @@ func (c *Controller) analyseCanary(r *flaggerv1.Canary) bool {
}

if metric.Name == "istio_request_duration_seconds_bucket" {
val, err := c.observer.GetDeploymentHistogram(r.Spec.TargetRef.Name, r.Namespace, metric.Name, metric.Interval)
val, err := c.observer.GetIstioRequestDuration(r.Spec.TargetRef.Name, r.Namespace, metric.Name, metric.Interval)
if err != nil {
c.recordEventErrorf(r, "Metrics server %s query failed: %v", c.observer.metricsServer, err)
c.recordEventErrorf(r, "Metrics server %s query failed: %v", c.observer.GetMetricsServer(), err)
return false
}
t := time.Duration(metric.Threshold) * time.Millisecond
Expand All @@ -551,7 +551,7 @@ func (c *Controller) analyseCanary(r *flaggerv1.Canary) bool {
c.recordEventWarningf(r, "Halt advancement no values found for metric %s probably %s.%s is not receiving traffic",
metric.Name, r.Spec.TargetRef.Name, r.Namespace)
} else {
c.recordEventErrorf(r, "Metrics server %s query failed: %v", c.observer.metricsServer, err)
c.recordEventErrorf(r, "Metrics server %s query failed: %v", c.observer.GetMetricsServer(), err)
}
return false
}
Expand Down
Loading

0 comments on commit ff7c0a1

Please sign in to comment.