From 26e7b70e4907c465081ba46775b91f683b71bf59 Mon Sep 17 00:00:00 2001 From: Erwan Quelin Date: Wed, 21 Mar 2018 23:55:51 +0100 Subject: [PATCH 1/8] Added historical metrics --- unitymetrics.go | 140 +++++++++++++++++++++++++++++++----------------- 1 file changed, 90 insertions(+), 50 deletions(-) diff --git a/unitymetrics.go b/unitymetrics.go index 3c1d37c..b61822c 100644 --- a/unitymetrics.go +++ b/unitymetrics.go @@ -14,6 +14,40 @@ import ( var log = logrus.New() var unityName string +// +func parseResult(path string, valuesMap map[string]interface{}) { + + tagsMap := make(map[string]string) + tagNames := make(map[int]string) + + pathSplit := strings.Split(path, ".") + + var measurementName string + if pathSplit[0] == "kpi" { + measurementName = fmt.Sprintf("kpi_%s", pathSplit[1]) + } else { + measurementName = pathSplit[2] + } + + j := 0 + for i, v := range pathSplit { + if v == "*" { + tagName := pathSplit[i-1] + tagNames[j] = tagName + j++ + } + } + + parseMap( + 0, + &path, + &measurementName, + tagNames, + tagsMap, + valuesMap, + ) +} + // https://stackoverflow.com/questions/29366038/looping-iterate-over-the-second-level-nested-json-in-go-lang func parseMap(index int, pathPtr *string, measurementNamePtr *string, tagNames map[int]string, tagsMap map[string]string, valuesMap map[string]interface{}) { @@ -73,7 +107,13 @@ func parseMap(index int, pathPtr *string, measurementNamePtr *string, tagNames m // Formating fied set // = var field string - field = fmt.Sprintf("%s=%s", pathSplit[len(pathSplit)-1], concreteVal) + _, ok := concreteVal.(float64) + + if ok { + field = fmt.Sprintf("%s=%f", pathSplit[len(pathSplit)-1], concreteVal) + } else { + field = fmt.Sprintf("%s=%s", pathSplit[len(pathSplit)-1], concreteVal) + } // Formating and printing the result using the InfluxDB's Line Protocol // https://docs.influxdata.com/influxdb/v1.5/write_protocols/line_protocol_tutorial/ @@ -91,7 +131,8 @@ func main() { passwordPtr := flag.String("password", "", "Password") unityPtr := flag.String("unity", "", "Unity IP or FQDN") intervalPtr := flag.Uint64("interval", 30, "Sampling interval") - pathsPtr := flag.String("paths", "kpi.sp.spa.utilization,sp.*.cpu.summary.busyTicks", "Unity metrics paths") + rtpathsPtr := flag.String("rtpaths", "kpi.sp.spa.utilization,sp.*.cpu.summary.busyTicks", "Real time metrics paths") + histpathsPtr := flag.String("histpaths", "sp.*.cpu.summary.utilization,sp.*.storage.lun.*.responseTime", "Historical metrics paths") debugPtr := flag.Bool("debug", false, "Debug mode") flag.Parse() @@ -123,8 +164,14 @@ func main() { log.WithFields(logrus.Fields{ "event": "flag", "key": "paths", - "value": *pathsPtr, - }).Debug("Parsed flag paths") + "value": *rtpathsPtr, + }).Debug("Parsed flag real time metrics paths") + + log.WithFields(logrus.Fields{ + "event": "flag", + "key": "paths", + "value": *histpathsPtr, + }).Debug("Parsed flag historical metrics paths") // Start a new Unity session @@ -152,65 +199,58 @@ func main() { // Store the name of the Unity unityName = System.Entries[0].Content.Name - // metric paths - paths := strings.Split(*pathsPtr, ",") - - // converting metric interval into uint32 - var interval = uint32(*intervalPtr) + if *histpathsPtr != "" { - // Request a new metric query - Metric, err := session.NewMetricRealTimeQuery(paths, interval) - if err != nil { - log.Fatal(err) - } + // metric paths + histpaths := strings.Split(*histpathsPtr, ",") - // Waiting that the sampling of the metrics is done - time.Sleep(time.Duration(Metric.Content.Interval) * time.Second) + for _, p := range histpaths { - // Get the results of the query - Result, err := session.GetMetricRealTimeQueryResult(Metric.Content.ID) - if err != nil { - log.Fatal(err) - } + log.WithFields(logrus.Fields{ + "event": "historical", + "key": "paths", + "value": p, + }).Debug("Querying historical metric") - // Parse the results - for _, v := range Result.Entries { + // Request a new metric query + MetricValue, err := session.GetmetricValue(p) + if err != nil { + log.Error(err) + } - valuesMap := v.Content.Values.(map[string]interface{}) + parseResult(MetricValue.Entries[0].Content.Path, MetricValue.Entries[0].Content.Values.(map[string]interface{})) + } + } - tagsMap := make(map[string]string) - tagNames := make(map[int]string) + if *rtpathsPtr != "" { - path := v.Content.Path + // metric paths + rtpaths := strings.Split(*rtpathsPtr, ",") - pathSplit := strings.Split(path, ".") + // converting metric interval into uint32 + var interval = uint32(*intervalPtr) - var measurementName string - if pathSplit[0] == "kpi" { - measurementName = fmt.Sprintf("kpi_%s", pathSplit[1]) - } else { - measurementName = pathSplit[2] + // Request a new metric query + Metric, err := session.NewMetricRealTimeQuery(rtpaths, interval) + if err != nil { + log.Fatal(err) } - j := 0 - for i, v := range pathSplit { - if v == "*" { - tagName := pathSplit[i-1] - tagNames[j] = tagName - j++ - } + // Waiting that the sampling of the metrics is done + time.Sleep(time.Duration(Metric.Content.Interval) * time.Second) + + // Get the results of the query + Result, err := session.GetMetricRealTimeQueryResult(Metric.Content.ID) + if err != nil { + log.Fatal(err) } - parseMap( - 0, - &path, - &measurementName, - tagNames, - tagsMap, - valuesMap, - ) - } + // Parse the results - /* TODO: DELETE THE QUERY */ + //var v entryStruct + for _, v := range Result.Entries { + parseResult(v.Content.Path, v.Content.Values.(map[string]interface{})) + } + } } From 96ba80879d75de9b32a08ca2227459fea184ac5a Mon Sep 17 00:00:00 2001 From: Erwan Quelin Date: Thu, 29 Mar 2018 21:19:46 +0200 Subject: [PATCH 2/8] Changed how timestamps are handled --- unitymetrics.go | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/unitymetrics.go b/unitymetrics.go index b61822c..1e253a3 100644 --- a/unitymetrics.go +++ b/unitymetrics.go @@ -15,7 +15,7 @@ var log = logrus.New() var unityName string // -func parseResult(path string, valuesMap map[string]interface{}) { +func parseResult(timestamp time.Time, path string, valuesMap map[string]interface{}) { tagsMap := make(map[string]string) tagNames := make(map[int]string) @@ -39,6 +39,7 @@ func parseResult(path string, valuesMap map[string]interface{}) { } parseMap( + timestamp, 0, &path, &measurementName, @@ -49,7 +50,7 @@ func parseResult(path string, valuesMap map[string]interface{}) { } // https://stackoverflow.com/questions/29366038/looping-iterate-over-the-second-level-nested-json-in-go-lang -func parseMap(index int, pathPtr *string, measurementNamePtr *string, tagNames map[int]string, tagsMap map[string]string, valuesMap map[string]interface{}) { +func parseMap(timestamp time.Time, index int, pathPtr *string, measurementNamePtr *string, tagNames map[int]string, tagsMap map[string]string, valuesMap map[string]interface{}) { for key, val := range valuesMap { @@ -74,6 +75,7 @@ func parseMap(index int, pathPtr *string, measurementNamePtr *string, tagNames m } parseMap( + timestamp, index, pathPtr, measurementNamePtr, @@ -117,7 +119,7 @@ func parseMap(index int, pathPtr *string, measurementNamePtr *string, tagNames m // Formating and printing the result using the InfluxDB's Line Protocol // https://docs.influxdata.com/influxdb/v1.5/write_protocols/line_protocol_tutorial/ - fmt.Printf("%s,%s %s %d\n", *measurementNamePtr, tags, field, time.Now().UnixNano()) + fmt.Printf("%s,%s %s %d\n", *measurementNamePtr, tags, field, timestamp.UnixNano()) } } } @@ -131,8 +133,8 @@ func main() { passwordPtr := flag.String("password", "", "Password") unityPtr := flag.String("unity", "", "Unity IP or FQDN") intervalPtr := flag.Uint64("interval", 30, "Sampling interval") - rtpathsPtr := flag.String("rtpaths", "kpi.sp.spa.utilization,sp.*.cpu.summary.busyTicks", "Real time metrics paths") - histpathsPtr := flag.String("histpaths", "sp.*.cpu.summary.utilization,sp.*.storage.lun.*.responseTime", "Historical metrics paths") + rtpathsPtr := flag.String("rtpaths", "", "Real time metrics paths") + histpathsPtr := flag.String("histpaths", "", "Historical metrics paths") debugPtr := flag.Bool("debug", false, "Debug mode") flag.Parse() @@ -215,10 +217,15 @@ func main() { // Request a new metric query MetricValue, err := session.GetmetricValue(p) if err != nil { - log.Error(err) + log.WithFields(logrus.Fields{ + "event": "historical", + "key": "paths", + "value": p, + "error": err, + }).Error("Querying historical metric") } - parseResult(MetricValue.Entries[0].Content.Path, MetricValue.Entries[0].Content.Values.(map[string]interface{})) + parseResult(MetricValue.Entries[0].Content.Timestamp, MetricValue.Entries[0].Content.Path, MetricValue.Entries[0].Content.Values.(map[string]interface{})) } } @@ -242,7 +249,11 @@ func main() { // Get the results of the query Result, err := session.GetMetricRealTimeQueryResult(Metric.Content.ID) if err != nil { - log.Fatal(err) + log.WithFields(logrus.Fields{ + "event": "realtime", + "key": "error", + "error": err, + }).Error("Querying historical metric") } // Parse the results @@ -250,7 +261,7 @@ func main() { //var v entryStruct for _, v := range Result.Entries { - parseResult(v.Content.Path, v.Content.Values.(map[string]interface{})) + parseResult(v.Content.Timestamp, v.Content.Path, v.Content.Values.(map[string]interface{})) } } } From 3a337468d276a06fcfe131a9059b01c50fd16370 Mon Sep 17 00:00:00 2001 From: Erwan Quelin Date: Thu, 29 Mar 2018 21:32:49 +0200 Subject: [PATCH 3/8] Added tests before parsing results --- unitymetrics.go | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/unitymetrics.go b/unitymetrics.go index 1e253a3..793ef8b 100644 --- a/unitymetrics.go +++ b/unitymetrics.go @@ -223,9 +223,9 @@ func main() { "value": p, "error": err, }).Error("Querying historical metric") + } else { + parseResult(MetricValue.Entries[0].Content.Timestamp, MetricValue.Entries[0].Content.Path, MetricValue.Entries[0].Content.Values.(map[string]interface{})) } - - parseResult(MetricValue.Entries[0].Content.Timestamp, MetricValue.Entries[0].Content.Path, MetricValue.Entries[0].Content.Values.(map[string]interface{})) } } @@ -254,14 +254,12 @@ func main() { "key": "error", "error": err, }).Error("Querying historical metric") - } + } else { + // Parse the results + for _, v := range Result.Entries { - // Parse the results - - //var v entryStruct - for _, v := range Result.Entries { - - parseResult(v.Content.Timestamp, v.Content.Path, v.Content.Values.(map[string]interface{})) + parseResult(v.Content.Timestamp, v.Content.Path, v.Content.Values.(map[string]interface{})) + } } } } From b29333cbb567d872c8c0fcfc8720663cf0e340e3 Mon Sep 17 00:00:00 2001 From: Erwan Quelin Date: Thu, 29 Mar 2018 22:39:45 +0200 Subject: [PATCH 4/8] Added pools and storage resources capacity statistics --- unitymetrics.go | 55 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/unitymetrics.go b/unitymetrics.go index 793ef8b..5fe2da0 100644 --- a/unitymetrics.go +++ b/unitymetrics.go @@ -124,6 +124,29 @@ func parseMap(timestamp time.Time, index int, pathPtr *string, measurementNamePt } } +func parsePool(id string, name string, sizeFree uint64, sizeSubscribed uint64, sizeTotal uint64, sizeUsed uint64) { + + var tags string + + tags = fmt.Sprintf("unity=%s,pool=%s,poolname=%s", unityName, id, name) + + fmt.Printf("pool,%s sizefree=%d %d\n", tags, sizeFree, time.Now().UnixNano()) + fmt.Printf("pool,%s sizesubscribed=%d %d\n", tags, sizeSubscribed, time.Now().UnixNano()) + fmt.Printf("pool,%s sizetotal=%d %d\n", tags, sizeTotal, time.Now().UnixNano()) + fmt.Printf("pool,%s sizeused=%d %d\n", tags, sizeUsed, time.Now().UnixNano()) +} + +func parseStorageResource(id string, name string, sizeAllocated uint64, sizeTotal uint64, sizeUsed uint64) { + + var tags string + + tags = fmt.Sprintf("unity=%s,storageresource=%s,storageresourcename=%s", unityName, id, name) + + fmt.Printf("storageresource,%s sizeallocated=%d %d\n", tags, sizeAllocated, time.Now().UnixNano()) + fmt.Printf("storageresource,%s sizetotal=%d %d\n", tags, sizeTotal, time.Now().UnixNano()) + fmt.Printf("storageresource,%s sizeused=%d %d\n", tags, sizeUsed, time.Now().UnixNano()) +} + func main() { // Set logs parameters @@ -135,6 +158,7 @@ func main() { intervalPtr := flag.Uint64("interval", 30, "Sampling interval") rtpathsPtr := flag.String("rtpaths", "", "Real time metrics paths") histpathsPtr := flag.String("histpaths", "", "Historical metrics paths") + capacityPtr := flag.Bool("capacity", false, "Display capacity statisitcs") debugPtr := flag.Bool("debug", false, "Debug mode") flag.Parse() @@ -175,6 +199,12 @@ func main() { "value": *histpathsPtr, }).Debug("Parsed flag historical metrics paths") + log.WithFields(logrus.Fields{ + "event": "flag", + "key": "capacity", + "value": *capacityPtr, + }).Debug("Parsed flag capacity") + // Start a new Unity session log.WithFields(logrus.Fields{ @@ -201,6 +231,31 @@ func main() { // Store the name of the Unity unityName = System.Entries[0].Content.Name + if *capacityPtr { + + // Get pool informations + Pools, err := session.GetPool() + if err != nil { + log.Fatal(err) + } else { + for _, p := range Pools.Entries { + + parsePool(p.Content.ID, p.Content.Name, p.Content.SizeFree, p.Content.SizeSubscribed, p.Content.SizeTotal, p.Content.SizeUsed) + } + } + + StorageResources, err := session.GetStorageResource() + if err != nil { + log.Fatal(err) + } else { + for _, s := range StorageResources.Entries { + + parseStorageResource(s.Content.ID, s.Content.Name, s.Content.SizeAllocated, s.Content.SizeTotal, s.Content.SizeUsed) + } + } + + } + if *histpathsPtr != "" { // metric paths From 03616e18ba61a1d85e892b65c6c7d104b49910b8 Mon Sep 17 00:00:00 2001 From: Erwan Quelin Date: Fri, 30 Mar 2018 06:13:48 +0200 Subject: [PATCH 5/8] Added printInflux function and refactored parsing functions --- unitymetrics.go | 153 ++++++++++++++++++++++++++++++++++-------------- 1 file changed, 109 insertions(+), 44 deletions(-) diff --git a/unitymetrics.go b/unitymetrics.go index 5fe2da0..9eb48bf 100644 --- a/unitymetrics.go +++ b/unitymetrics.go @@ -4,6 +4,7 @@ import ( "flag" "fmt" "os" + "strconv" "strings" "time" @@ -11,8 +12,30 @@ import ( "github.com/sirupsen/logrus" ) +// Types +type pool struct { + ID string `json:"id"` + Name string `json:"name"` + SizeFree uint64 `json:"sizeFree"` + SizeTotal uint64 `json:"sizeTotal"` + SizeUsed uint64 `json:"sizeUsed"` + SizeSubscribed uint64 `json:"sizeSubscribed"` +} + +type storageresource struct { + ID string `json:"id"` + Name string `json:"name"` + SizeAllocated uint64 `json:"sizeAllocated"` + SizeTotal uint64 `json:"sizeTotal"` + SizeUsed uint64 `json:"sizeUsed"` + Type int `json:"type"` +} + +// Variables var log = logrus.New() var unityName string +var unityPools []pool +var unityStorageResource []storageresource // func parseResult(timestamp time.Time, path string, valuesMap map[string]interface{}) { @@ -99,52 +122,87 @@ func parseMap(timestamp time.Time, index int, pathPtr *string, measurementNamePt // Formating tags set // =,= - var tags string - - tags = fmt.Sprintf("unity=%s", unityName) - for k, v := range tagsMap { - tags = tags + fmt.Sprintf(",%s=%s", k, v) - } + tagsMap["unity"] = unityName // Formating fied set // = - var field string + fieldsMap := make(map[string]string) _, ok := concreteVal.(float64) if ok { - field = fmt.Sprintf("%s=%f", pathSplit[len(pathSplit)-1], concreteVal) + fieldsMap[pathSplit[len(pathSplit)-1]] = fmt.Sprintf("%f", concreteVal) } else { - field = fmt.Sprintf("%s=%s", pathSplit[len(pathSplit)-1], concreteVal) + fieldsMap[pathSplit[len(pathSplit)-1]] = fmt.Sprintf("%s", concreteVal) } // Formating and printing the result using the InfluxDB's Line Protocol // https://docs.influxdata.com/influxdb/v1.5/write_protocols/line_protocol_tutorial/ - fmt.Printf("%s,%s %s %d\n", *measurementNamePtr, tags, field, timestamp.UnixNano()) + + printInflux(*measurementNamePtr, tagsMap, fieldsMap, timestamp.UnixNano()) } } } func parsePool(id string, name string, sizeFree uint64, sizeSubscribed uint64, sizeTotal uint64, sizeUsed uint64) { - var tags string + tagsMap := make(map[string]string) + fieldsMap := make(map[string]string) - tags = fmt.Sprintf("unity=%s,pool=%s,poolname=%s", unityName, id, name) + tagsMap["unity"] = unityName + tagsMap["pool"] = id + tagsMap["poolname"] = name - fmt.Printf("pool,%s sizefree=%d %d\n", tags, sizeFree, time.Now().UnixNano()) - fmt.Printf("pool,%s sizesubscribed=%d %d\n", tags, sizeSubscribed, time.Now().UnixNano()) - fmt.Printf("pool,%s sizetotal=%d %d\n", tags, sizeTotal, time.Now().UnixNano()) - fmt.Printf("pool,%s sizeused=%d %d\n", tags, sizeUsed, time.Now().UnixNano()) + fieldsMap["sizefree"] = strconv.FormatUint(sizeFree, 10) + fieldsMap["sizesubscribed"] = strconv.FormatUint(sizeSubscribed, 10) + fieldsMap["sizetotal"] = strconv.FormatUint(sizeTotal, 10) + fieldsMap["sizeused"] = strconv.FormatUint(sizeUsed, 10) + + printInflux("pool", tagsMap, fieldsMap, time.Now().UnixNano()) } func parseStorageResource(id string, name string, sizeAllocated uint64, sizeTotal uint64, sizeUsed uint64) { + tagsMap := make(map[string]string) + fieldsMap := make(map[string]string) + + tagsMap["unity"] = unityName + tagsMap["storageresource"] = id + tagsMap["storageresourcename"] = name + + fieldsMap["sizeallocated"] = strconv.FormatUint(sizeAllocated, 10) + fieldsMap["sizetotal"] = strconv.FormatUint(sizeTotal, 10) + fieldsMap["sizeused"] = strconv.FormatUint(sizeUsed, 10) + + printInflux("storageresource", tagsMap, fieldsMap, time.Now().UnixNano()) +} + +func printInflux(measurement string, tagsMap map[string]string, fieldsMap map[string]string, timestamp int64) { + + // Parse tagsMap var tags string + var i int + for k, v := range tagsMap { + if i == 0 { + tags = tags + fmt.Sprintf("%s=%s", k, v) + } else { + tags = tags + fmt.Sprintf(",%s=%s", k, v) + } + i++ + } - tags = fmt.Sprintf("unity=%s,storageresource=%s,storageresourcename=%s", unityName, id, name) + // Parse fieldsMap + var fields string + var j int + for k, v := range fieldsMap { + if j == 0 { + fields = fields + fmt.Sprintf("%s=%s", k, v) + } else { + fields = fields + fmt.Sprintf(",%s=%s", k, v) + } + j++ + } - fmt.Printf("storageresource,%s sizeallocated=%d %d\n", tags, sizeAllocated, time.Now().UnixNano()) - fmt.Printf("storageresource,%s sizetotal=%d %d\n", tags, sizeTotal, time.Now().UnixNano()) - fmt.Printf("storageresource,%s sizeused=%d %d\n", tags, sizeUsed, time.Now().UnixNano()) + fmt.Printf("%s,%s %s %d\n", measurement, tags, fields, timestamp) } func main() { @@ -226,34 +284,41 @@ func main() { System, err := session.GetbasicSystemInfo() if err != nil { log.Fatal(err) + } else { + // Store the name of the Unity + unityName = System.Entries[0].Content.Name } - // Store the name of the Unity - unityName = System.Entries[0].Content.Name - - if *capacityPtr { - - // Get pool informations - Pools, err := session.GetPool() - if err != nil { - log.Fatal(err) - } else { - for _, p := range Pools.Entries { - - parsePool(p.Content.ID, p.Content.Name, p.Content.SizeFree, p.Content.SizeSubscribed, p.Content.SizeTotal, p.Content.SizeUsed) - } + // Store pools informations + Pools, err := session.GetPool() + if err != nil { + log.Fatal(err) + } else { + for _, p := range Pools.Entries { + unityPools = append(unityPools, p.Content) } + } - StorageResources, err := session.GetStorageResource() - if err != nil { - log.Fatal(err) - } else { - for _, s := range StorageResources.Entries { + // Store storage resources informations + StorageResources, err := session.GetStorageResource() + if err != nil { + log.Fatal(err) + } else { + for _, s := range StorageResources.Entries { + unityStorageResource = append(unityStorageResource, s.Content) + } + } - parseStorageResource(s.Content.ID, s.Content.Name, s.Content.SizeAllocated, s.Content.SizeTotal, s.Content.SizeUsed) - } + if *capacityPtr { + // Parse pool info into influxdb line protocol + for _, p := range unityPools { + parsePool(p.ID, p.Name, p.SizeFree, p.SizeSubscribed, p.SizeTotal, p.SizeUsed) } + // Parse storage resources info into influxdb line protocol + for _, s := range unityStorageResource { + parseStorageResource(s.ID, s.Name, s.SizeAllocated, s.SizeTotal, s.SizeUsed) + } } if *histpathsPtr != "" { @@ -277,7 +342,7 @@ func main() { "key": "paths", "value": p, "error": err, - }).Error("Querying historical metric") + }).Error("Querying historical metric(s)") } else { parseResult(MetricValue.Entries[0].Content.Timestamp, MetricValue.Entries[0].Content.Path, MetricValue.Entries[0].Content.Values.(map[string]interface{})) } @@ -298,7 +363,7 @@ func main() { log.Fatal(err) } - // Waiting that the sampling of the metrics is done + // Waiting thforat the sampling of the metrics to be done time.Sleep(time.Duration(Metric.Content.Interval) * time.Second) // Get the results of the query @@ -308,7 +373,7 @@ func main() { "event": "realtime", "key": "error", "error": err, - }).Error("Querying historical metric") + }).Error("Querying real time metric(s)") } else { // Parse the results for _, v := range Result.Entries { From 99c243b478612e854ebb66c71d38824b8df00bfd Mon Sep 17 00:00:00 2001 From: Erwan Quelin Date: Fri, 30 Mar 2018 16:15:01 +0200 Subject: [PATCH 6/8] Added historical kpi values query --- .gitignore | 1 + unitymetrics.go | 55 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+) diff --git a/.gitignore b/.gitignore index 4806ce2..61b5e3a 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ unitymetrics unitymetrics.exe +telegraf.conf *.tar.gz *.zip diff --git a/unitymetrics.go b/unitymetrics.go index 9eb48bf..9b52bc5 100644 --- a/unitymetrics.go +++ b/unitymetrics.go @@ -176,6 +176,31 @@ func parseStorageResource(id string, name string, sizeAllocated uint64, sizeTota printInflux("storageresource", tagsMap, fieldsMap, time.Now().UnixNano()) } +func parseKpiValue(id string, name string, path string, value float64) { + + pathSplit := strings.Split(path, ".") + tagsMap := make(map[string]string) + fieldsMap := make(map[string]string) + + tagsMap["unity"] = unityName + + for i, v := range pathSplit { + if v == id { + tagName := strings.ToLower(pathSplit[i-1]) + tagsMap[tagName] = v + tagsMap[tagName+"name"] = strings.Replace(name, " ", "_", -1) + } + if v == "sp" || v == "rw" || v == "lun" { + tagsMap["lun"] = pathSplit[i+1] + } + } + + fieldsMap[pathSplit[len(pathSplit)-1]] = fmt.Sprintf("%f", value) + + printInflux("kpi_"+pathSplit[1], tagsMap, fieldsMap, time.Now().UnixNano()) +} + +// printInflux purpose is to output data in the influxdb line format func printInflux(measurement string, tagsMap map[string]string, fieldsMap map[string]string, timestamp int64) { // Parse tagsMap @@ -216,6 +241,7 @@ func main() { intervalPtr := flag.Uint64("interval", 30, "Sampling interval") rtpathsPtr := flag.String("rtpaths", "", "Real time metrics paths") histpathsPtr := flag.String("histpaths", "", "Historical metrics paths") + histkpipathsPtr := flag.String("histkpipaths", "", "Historical KPI metrics paths") capacityPtr := flag.Bool("capacity", false, "Display capacity statisitcs") debugPtr := flag.Bool("debug", false, "Debug mode") @@ -257,6 +283,12 @@ func main() { "value": *histpathsPtr, }).Debug("Parsed flag historical metrics paths") + log.WithFields(logrus.Fields{ + "event": "flag", + "key": "paths", + "value": *histkpipathsPtr, + }).Debug("Parsed flag historical KPI metrics paths") + log.WithFields(logrus.Fields{ "event": "flag", "key": "capacity", @@ -309,6 +341,29 @@ func main() { } } + if *histkpipathsPtr != "" { + // Request a new kpi query + histkpipaths := strings.Split(*histkpipathsPtr, ",") + + for _, p := range histkpipaths { + + KpiValue, err := session.GetkpiValue(p) + if err != nil { + log.WithFields(logrus.Fields{ + "event": "historical", + "key": "paths", + "value": p, + "error": err, + }).Error("Querying kpi historical metric(s)") + } else { + for _, k := range KpiValue.Entries { + parseKpiValue(k.Content.ID, k.Content.Name, k.Content.Path, k.Content.Values[k.Content.EndTime]) + } + + } + } + } + if *capacityPtr { // Parse pool info into influxdb line protocol for _, p := range unityPools { From e61769f77390296d8a32aca9e8bac6d7fc7b9cea Mon Sep 17 00:00:00 2001 From: Erwan Quelin Date: Fri, 30 Mar 2018 17:10:22 +0200 Subject: [PATCH 7/8] Updated README --- README.md | 91 +++++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 75 insertions(+), 16 deletions(-) diff --git a/README.md b/README.md index 60f8819..40dba6a 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,12 @@ # UNITYMETRICS -Unitymetrics is a tool written in Go for collecting metrics from a Dell EMC Unity array and translating them in InfluxDB's line protocol. +Unitymetrics is a tool written in Go for collecting usage and performance metrics from a Dell EMC Unity array and translating them in InfluxDB's line protocol. -It can be use to send metrics in a InfluxDB database with the help of Telegraf. +It can be useful to send metrics in a InfluxDB database with the help of Telegraf. ## How to find the available metrics -In the Unity API, metrics are define by a path. For example, if you want to collect the remaining memory available on the storage processors, you'll have to use the path `sp.*.memory.summary.freeBytes`. +In the Unity API, metrics are define by a path. For example, if you want to collect the remaining memory available on the storage processors, you'll have to use the path `sp.*.memory.summary.freeBytes`. You can find a list of the metrics [here](https://gist.github.com/equelin/37486519972f8161c480f47ae5904390). @@ -14,9 +14,10 @@ If you look at the different path, you will figure that some of them contains `* When there is a `*` in the path, you can use the path as-is in your request, the `*` will be automatically replaced with all the possibilities. For example, if you want to use the path `sp.*.memory.summary.freeBytes`. The API will interpret it as if you were requesting the free memory for the SPA and the SPB. If you need this information only for one of the SPs, you can use the path `sp.spa.memory.summary.freeBytes` -When there is a `+` in the path, you have to replace it with the relevant item by yourself before requesting the API. For example, if you want to retrieve the CPU utilization of the SPA, you have to modify the path `kpi.sp.+.utilization` like this `kpi.sp.spa.utilization` +When there is a `+` in the path, you can replace it with the relevant item by yourself before requesting the API or by a `*` for breaking the results by this item. For example, if you want to specifically retrieve the CPU utilization of the SPA, you have to modify the path `kpi.sp.+.utilization` like this `kpi.sp.spa.utilization`. ## How to install it + ### From prebuilt release You can find prebuilt unitymetrics binaries on the [releases page](https://github.com/equelin/unitymetrics/releases). @@ -53,26 +54,32 @@ You can build unitymetrics using: See usage with: -``` +```bash ./unitymetrics -h ``` -#### Run a Dell Unity metrics collection with the default metrics and a sampling interval +### Run a Dell EMC Unity metrics collection for an historical path -``` -./unitymetrics -unity unity01.example.com -user admin -password AwesomePassword +```bash +./unitymetrics -unity unity01.example.com -user admin -password AwesomePassword -histkpipaths kpi.sp.*.utilization ``` -#### Run a Dell Unity metrics collection with the default metrics and sampling interval of 10 seconds +### Run a Dell EMC Unity metrics collection with a real time metric and sampling interval of 10 seconds -``` -./unitymetrics -unity unity01.example.com -user admin -password AwesomePassword -interval 10 +```bash +./unitymetrics -unity unity01.example.com -user admin -password AwesomePassword -rtpaths sp.*.memory.summary.freeBytes -interval 10 ``` -#### Run a Dell Unity metrics collection with specific metrics and sampling interval of 10 seconds +### Run a Dell EMC Unity metrics collection with multiple metrics and sampling interval of 10 seconds +```bash +./unitymetrics -unity unity01.example.com -user admin -password AwesomePassword -interval 10 -histkpipaths kpi.sp.*.utilization,kpi.lun.+.sp.+.rw.+.throughput,kpi.lun.*.sp.+.rw.+.throughput,kpi.lun.+.sp.+.responseTime,kpi.lun.*.sp.+.responseTime,kpi.lun.+.sp.+.queueLength,kpi.lun.*.sp.+.queueLength ``` -./unitymetrics -unity unity01.example.com -user admin -password AwesomePassword -interval 10 -paths kpi.sp.spa.utilization,sp.*.cpu.summary.busyTicks,sp.*.cpu.uptime,sp.*.storage.pool.*.sizeFree, + +### Run a Dell EMC Unity metrics collection for collecting capacity statistics + +```bash +./unitymetrics -unity unity01.example.com -user admin -password AwesomePassword -capacity ``` ## Using unitymetrics with Telegraf @@ -93,14 +100,66 @@ Here is an example of a working telegraf's config file: [[inputs.exec]] # Shell/commands array # Full command line to executable with parameters, or a glob pattern to run all matching files. - commands = ["unitymetrics -user admin -password AwesomePassword -unity unity01.okcomputer.lab -interval 10 -paths kpi.sp.spa.utilization,sp.*.cpu.summary.busyTicks,sp.*.cpu.uptime,sp.*.storage.pool.*.sizeFree,sp.*.storage.pool.*.sizeSubscribed,sp.*.storage.pool.*.sizeTotal,sp.*.storage.pool.*sizeUsed,sp.*.memory.summary.totalBytes,sp.*.memory.summary.totalUsedBytes"] + commands = ["unitymetrics -user admin -password Mypassword -unity unity01.example.com -histkpipaths kpi.sp.*.utilization,kpi.lun.+.sp.+.rw.+.throughput,kpi.lun.*.sp.+.rw.+.throughput,kpi.lun.+.sp.+.responseTime,kpi.lun.*.sp.+.responseTime,kpi.lun.+.sp.+.queueLength,kpi.lun.*.sp.+.queueLength -capacity"] + + # Timeout for each command to complete. + timeout = "60s" + + # Data format to consume. + # NOTE json only reads numerical measurements, strings and booleans are ignored. + data_format = "influx" + + interval = "60s" +``` + +If needed, you can specify more than one input plugin. It might be useful if you want to gather different statistics with different intervals or if you want to query different arrays. + +```Toml +############################################################################### +# INPUT PLUGINS # +############################################################################### + +[[inputs.exec]] + # Shell/commands array + # Full command line to executable with parameters, or a glob pattern to run all matching files. + commands = ["unitymetrics -user admin -password Mypassword -unity unity01.example.com -histkpipaths kpi.sp.*.utilization,kpi.lun.+.sp.+.rw.+.throughput,kpi.lun.*.sp.+.rw.+.throughput,kpi.lun.+.sp.+.responseTime,kpi.lun.*.sp.+.responseTime,kpi.lun.+.sp.+.queueLength,kpi.lun.*.sp.+.queueLength -capacity"] + + # Timeout for each command to complete. + timeout = "60s" + + # Data format to consume. + # NOTE json only reads numerical measurements, strings and booleans are ignored. + data_format = "influx" + + interval = "60s" + +[[inputs.exec]] + # Shell/commands array + # Full command line to executable with parameters, or a glob pattern to run all matching files. + commands = ["unitymetrics -user admin -password Mypassword -unity unity01.example.com -interval 50 -rtpaths sp.*.memory.summary.freeBytes,sp.*.memory.summary.totalBytes,sp.*.memory.summary.totalUsedBytes,sp.*.cpu.uptime"] - # Timeout for each command to complete. - timeout = "20s" + # Timeout for each command to complete. + timeout = "60s" # Data format to consume. # NOTE json only reads numerical measurements, strings and booleans are ignored. data_format = "influx" + + interval = "60s" + +[[inputs.exec]] + # Shell/commands array + # Full command line to executable with parameters, or a glob pattern to run all matching files. + commands = ["unitymetrics -user admin -password Mypassword -unity unity02.example.com -histkpipaths kpi.sp.*.utilization,kpi.lun.+.sp.+.rw.+.throughput,kpi.lun.*.sp.+.rw.+.throughput,kpi.lun.+.sp.+.responseTime,kpi.lun.*.sp.+.responseTime,kpi.lun.+.sp.+.queueLength,kpi.lun.*.sp.+.queueLength -capacity"] + + # Timeout for each command to complete. + timeout = "60s" + + # Data format to consume. + # NOTE json only reads numerical measurements, strings and booleans are ignored. + data_format = "influx" + + interval = "60s" ``` # Author From 2fcdb2bf62c558440604c69c2b7fdc8c4bf10616 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erwan=20Qu=C3=A9lin?= Date: Fri, 30 Mar 2018 19:35:52 +0200 Subject: [PATCH 8/8] Update README.md --- README.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 40dba6a..9ece89f 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,11 @@ Unitymetrics is a tool written in Go for collecting usage and performance metrics from a Dell EMC Unity array and translating them in InfluxDB's line protocol. -It can be useful to send metrics in a InfluxDB database with the help of Telegraf. +![unitymetrics-min](https://user-images.githubusercontent.com/9823778/38147007-b9abfe82-3450-11e8-8590-87d3afb7e480.png) + +It can be useful to send metrics in a InfluxDB database with the help of Telegraf and then display metrics in grafana. + +![screenshot-2018-3-30 grafana - unity](https://user-images.githubusercontent.com/9823778/38147174-60410d8c-3451-11e8-8320-016c38dbe705.png) ## How to find the available metrics