diff --git a/.chloggen/add_inspect_metrics.yaml b/.chloggen/add_inspect_metrics.yaml new file mode 100755 index 000000000000..d71e07ed981d --- /dev/null +++ b/.chloggen/add_inspect_metrics.yaml @@ -0,0 +1,16 @@ +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: breaking + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: dockerstatsreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: "Add [container.cpu.limit], [container.cpu.shares] and [container.restarts] metrics from docker container api" + +# One or more tracking issues related to the change +issues: [21087] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: It requires API version 1.25 or greater. diff --git a/receiver/dockerstatsreceiver/config_test.go b/receiver/dockerstatsreceiver/config_test.go index d043aa513a5e..abc407d3d587 100644 --- a/receiver/dockerstatsreceiver/config_test.go +++ b/receiver/dockerstatsreceiver/config_test.go @@ -40,7 +40,7 @@ func TestLoadConfig(t *testing.T) { }, Endpoint: "http://example.com/", - DockerAPIVersion: 1.24, + DockerAPIVersion: 1.25, ExcludedImages: []string{ "undesired-container", @@ -95,5 +95,5 @@ func TestValidateErrors(t *testing.T) { assert.Equal(t, "endpoint must be specified", component.ValidateConfig(cfg).Error()) cfg = &Config{ScraperControllerSettings: scraperhelper.ScraperControllerSettings{CollectionInterval: 1 * time.Second}, Endpoint: "someEndpoint", DockerAPIVersion: 1.21} - assert.Equal(t, "api_version must be at least 1.22", component.ValidateConfig(cfg).Error()) + assert.Equal(t, "api_version must be at least 1.25", component.ValidateConfig(cfg).Error()) } diff --git a/receiver/dockerstatsreceiver/documentation.md b/receiver/dockerstatsreceiver/documentation.md index c2ce8dbeb4d7..abfa0fd6bc10 100644 --- a/receiver/dockerstatsreceiver/documentation.md +++ b/receiver/dockerstatsreceiver/documentation.md @@ -296,6 +296,24 @@ Number of sectors transferred to/from disk by the group and descendant groups (O | device_minor | Device minor number for block IO operations. | Any Str | | operation | Type of BlockIO operation. | Any Str | +### container.cpu.limit + +CPU limit set for the container. + +This metric is only reported if the container has limits set with -cpus, -cpuset-cpus or -cpu-quota. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {cpus} | Gauge | Double | + +### container.cpu.shares + +CPU shares set for the container. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + ### container.cpu.throttling_data.periods Number of periods with throttling active. @@ -698,6 +716,14 @@ It requires docker API 1.23 or higher and kernel version >= 4.3 with pids cgroup | ---- | ----------- | ---------- | ----------------------- | --------- | | {pids} | Sum | Int | Cumulative | false | +### container.restarts + +Number of restarts for the container. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {restarts} | Sum | Int | Cumulative | true | + ### container.uptime Time elapsed since container start time. diff --git a/receiver/dockerstatsreceiver/internal/metadata/generated_config.go b/receiver/dockerstatsreceiver/internal/metadata/generated_config.go index 1494a935d53a..4eeff7d30275 100644 --- a/receiver/dockerstatsreceiver/internal/metadata/generated_config.go +++ b/receiver/dockerstatsreceiver/internal/metadata/generated_config.go @@ -33,6 +33,8 @@ type MetricsConfig struct { ContainerBlockioIoTimeRecursive MetricConfig `mapstructure:"container.blockio.io_time_recursive"` ContainerBlockioIoWaitTimeRecursive MetricConfig `mapstructure:"container.blockio.io_wait_time_recursive"` ContainerBlockioSectorsRecursive MetricConfig `mapstructure:"container.blockio.sectors_recursive"` + ContainerCPULimit MetricConfig `mapstructure:"container.cpu.limit"` + ContainerCPUShares MetricConfig `mapstructure:"container.cpu.shares"` ContainerCPUThrottlingDataPeriods MetricConfig `mapstructure:"container.cpu.throttling_data.periods"` ContainerCPUThrottlingDataThrottledPeriods MetricConfig `mapstructure:"container.cpu.throttling_data.throttled_periods"` ContainerCPUThrottlingDataThrottledTime MetricConfig `mapstructure:"container.cpu.throttling_data.throttled_time"` @@ -90,6 +92,7 @@ type MetricsConfig struct { ContainerNetworkIoUsageTxPackets MetricConfig `mapstructure:"container.network.io.usage.tx_packets"` ContainerPidsCount MetricConfig `mapstructure:"container.pids.count"` ContainerPidsLimit MetricConfig `mapstructure:"container.pids.limit"` + ContainerRestarts MetricConfig `mapstructure:"container.restarts"` ContainerUptime MetricConfig `mapstructure:"container.uptime"` } @@ -119,6 +122,12 @@ func DefaultMetricsConfig() MetricsConfig { ContainerBlockioSectorsRecursive: MetricConfig{ Enabled: false, }, + ContainerCPULimit: MetricConfig{ + Enabled: false, + }, + ContainerCPUShares: MetricConfig{ + Enabled: false, + }, ContainerCPUThrottlingDataPeriods: MetricConfig{ Enabled: false, }, @@ -290,6 +299,9 @@ func DefaultMetricsConfig() MetricsConfig { ContainerPidsLimit: MetricConfig{ Enabled: false, }, + ContainerRestarts: MetricConfig{ + Enabled: false, + }, ContainerUptime: MetricConfig{ Enabled: false, }, diff --git a/receiver/dockerstatsreceiver/internal/metadata/generated_config_test.go b/receiver/dockerstatsreceiver/internal/metadata/generated_config_test.go index 2c94c9a2c75d..c44f0a576287 100644 --- a/receiver/dockerstatsreceiver/internal/metadata/generated_config_test.go +++ b/receiver/dockerstatsreceiver/internal/metadata/generated_config_test.go @@ -34,6 +34,8 @@ func TestMetricsBuilderConfig(t *testing.T) { ContainerBlockioIoTimeRecursive: MetricConfig{Enabled: true}, ContainerBlockioIoWaitTimeRecursive: MetricConfig{Enabled: true}, ContainerBlockioSectorsRecursive: MetricConfig{Enabled: true}, + ContainerCPULimit: MetricConfig{Enabled: true}, + ContainerCPUShares: MetricConfig{Enabled: true}, ContainerCPUThrottlingDataPeriods: MetricConfig{Enabled: true}, ContainerCPUThrottlingDataThrottledPeriods: MetricConfig{Enabled: true}, ContainerCPUThrottlingDataThrottledTime: MetricConfig{Enabled: true}, @@ -91,6 +93,7 @@ func TestMetricsBuilderConfig(t *testing.T) { ContainerNetworkIoUsageTxPackets: MetricConfig{Enabled: true}, ContainerPidsCount: MetricConfig{Enabled: true}, ContainerPidsLimit: MetricConfig{Enabled: true}, + ContainerRestarts: MetricConfig{Enabled: true}, ContainerUptime: MetricConfig{Enabled: true}, }, ResourceAttributes: ResourceAttributesConfig{ @@ -116,6 +119,8 @@ func TestMetricsBuilderConfig(t *testing.T) { ContainerBlockioIoTimeRecursive: MetricConfig{Enabled: false}, ContainerBlockioIoWaitTimeRecursive: MetricConfig{Enabled: false}, ContainerBlockioSectorsRecursive: MetricConfig{Enabled: false}, + ContainerCPULimit: MetricConfig{Enabled: false}, + ContainerCPUShares: MetricConfig{Enabled: false}, ContainerCPUThrottlingDataPeriods: MetricConfig{Enabled: false}, ContainerCPUThrottlingDataThrottledPeriods: MetricConfig{Enabled: false}, ContainerCPUThrottlingDataThrottledTime: MetricConfig{Enabled: false}, @@ -173,6 +178,7 @@ func TestMetricsBuilderConfig(t *testing.T) { ContainerNetworkIoUsageTxPackets: MetricConfig{Enabled: false}, ContainerPidsCount: MetricConfig{Enabled: false}, ContainerPidsLimit: MetricConfig{Enabled: false}, + ContainerRestarts: MetricConfig{Enabled: false}, ContainerUptime: MetricConfig{Enabled: false}, }, ResourceAttributes: ResourceAttributesConfig{ diff --git a/receiver/dockerstatsreceiver/internal/metadata/generated_metrics.go b/receiver/dockerstatsreceiver/internal/metadata/generated_metrics.go index 8aaef0e4d807..c2c39c061aee 100644 --- a/receiver/dockerstatsreceiver/internal/metadata/generated_metrics.go +++ b/receiver/dockerstatsreceiver/internal/metadata/generated_metrics.go @@ -452,6 +452,104 @@ func newMetricContainerBlockioSectorsRecursive(cfg MetricConfig) metricContainer return m } +type metricContainerCPULimit struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.cpu.limit metric with initial data. +func (m *metricContainerCPULimit) init() { + m.data.SetName("container.cpu.limit") + m.data.SetDescription("CPU limit set for the container.") + m.data.SetUnit("{cpus}") + m.data.SetEmptyGauge() +} + +func (m *metricContainerCPULimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerCPULimit) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerCPULimit) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerCPULimit(cfg MetricConfig) metricContainerCPULimit { + m := metricContainerCPULimit{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricContainerCPUShares struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.cpu.shares metric with initial data. +func (m *metricContainerCPUShares) init() { + m.data.SetName("container.cpu.shares") + m.data.SetDescription("CPU shares set for the container.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricContainerCPUShares) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerCPUShares) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerCPUShares) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerCPUShares(cfg MetricConfig) metricContainerCPUShares { + m := metricContainerCPUShares{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricContainerCPUThrottlingDataPeriods struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -3373,6 +3471,57 @@ func newMetricContainerPidsLimit(cfg MetricConfig) metricContainerPidsLimit { return m } +type metricContainerRestarts struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.restarts metric with initial data. +func (m *metricContainerRestarts) init() { + m.data.SetName("container.restarts") + m.data.SetDescription("Number of restarts for the container.") + m.data.SetUnit("{restarts}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricContainerRestarts) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerRestarts) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerRestarts) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerRestarts(cfg MetricConfig) metricContainerRestarts { + m := metricContainerRestarts{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricContainerUptime struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -3438,6 +3587,8 @@ type MetricsBuilder struct { metricContainerBlockioIoTimeRecursive metricContainerBlockioIoTimeRecursive metricContainerBlockioIoWaitTimeRecursive metricContainerBlockioIoWaitTimeRecursive metricContainerBlockioSectorsRecursive metricContainerBlockioSectorsRecursive + metricContainerCPULimit metricContainerCPULimit + metricContainerCPUShares metricContainerCPUShares metricContainerCPUThrottlingDataPeriods metricContainerCPUThrottlingDataPeriods metricContainerCPUThrottlingDataThrottledPeriods metricContainerCPUThrottlingDataThrottledPeriods metricContainerCPUThrottlingDataThrottledTime metricContainerCPUThrottlingDataThrottledTime @@ -3495,6 +3646,7 @@ type MetricsBuilder struct { metricContainerNetworkIoUsageTxPackets metricContainerNetworkIoUsageTxPackets metricContainerPidsCount metricContainerPidsCount metricContainerPidsLimit metricContainerPidsLimit + metricContainerRestarts metricContainerRestarts metricContainerUptime metricContainerUptime } @@ -3522,6 +3674,8 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSetting metricContainerBlockioIoTimeRecursive: newMetricContainerBlockioIoTimeRecursive(mbc.Metrics.ContainerBlockioIoTimeRecursive), metricContainerBlockioIoWaitTimeRecursive: newMetricContainerBlockioIoWaitTimeRecursive(mbc.Metrics.ContainerBlockioIoWaitTimeRecursive), metricContainerBlockioSectorsRecursive: newMetricContainerBlockioSectorsRecursive(mbc.Metrics.ContainerBlockioSectorsRecursive), + metricContainerCPULimit: newMetricContainerCPULimit(mbc.Metrics.ContainerCPULimit), + metricContainerCPUShares: newMetricContainerCPUShares(mbc.Metrics.ContainerCPUShares), metricContainerCPUThrottlingDataPeriods: newMetricContainerCPUThrottlingDataPeriods(mbc.Metrics.ContainerCPUThrottlingDataPeriods), metricContainerCPUThrottlingDataThrottledPeriods: newMetricContainerCPUThrottlingDataThrottledPeriods(mbc.Metrics.ContainerCPUThrottlingDataThrottledPeriods), metricContainerCPUThrottlingDataThrottledTime: newMetricContainerCPUThrottlingDataThrottledTime(mbc.Metrics.ContainerCPUThrottlingDataThrottledTime), @@ -3579,6 +3733,7 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSetting metricContainerNetworkIoUsageTxPackets: newMetricContainerNetworkIoUsageTxPackets(mbc.Metrics.ContainerNetworkIoUsageTxPackets), metricContainerPidsCount: newMetricContainerPidsCount(mbc.Metrics.ContainerPidsCount), metricContainerPidsLimit: newMetricContainerPidsLimit(mbc.Metrics.ContainerPidsLimit), + metricContainerRestarts: newMetricContainerRestarts(mbc.Metrics.ContainerRestarts), metricContainerUptime: newMetricContainerUptime(mbc.Metrics.ContainerUptime), } for _, op := range options { @@ -3650,6 +3805,8 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { mb.metricContainerBlockioIoTimeRecursive.emit(ils.Metrics()) mb.metricContainerBlockioIoWaitTimeRecursive.emit(ils.Metrics()) mb.metricContainerBlockioSectorsRecursive.emit(ils.Metrics()) + mb.metricContainerCPULimit.emit(ils.Metrics()) + mb.metricContainerCPUShares.emit(ils.Metrics()) mb.metricContainerCPUThrottlingDataPeriods.emit(ils.Metrics()) mb.metricContainerCPUThrottlingDataThrottledPeriods.emit(ils.Metrics()) mb.metricContainerCPUThrottlingDataThrottledTime.emit(ils.Metrics()) @@ -3707,6 +3864,7 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { mb.metricContainerNetworkIoUsageTxPackets.emit(ils.Metrics()) mb.metricContainerPidsCount.emit(ils.Metrics()) mb.metricContainerPidsLimit.emit(ils.Metrics()) + mb.metricContainerRestarts.emit(ils.Metrics()) mb.metricContainerUptime.emit(ils.Metrics()) for _, op := range rmo { @@ -3768,6 +3926,16 @@ func (mb *MetricsBuilder) RecordContainerBlockioSectorsRecursiveDataPoint(ts pco mb.metricContainerBlockioSectorsRecursive.recordDataPoint(mb.startTime, ts, val, deviceMajorAttributeValue, deviceMinorAttributeValue, operationAttributeValue) } +// RecordContainerCPULimitDataPoint adds a data point to container.cpu.limit metric. +func (mb *MetricsBuilder) RecordContainerCPULimitDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricContainerCPULimit.recordDataPoint(mb.startTime, ts, val) +} + +// RecordContainerCPUSharesDataPoint adds a data point to container.cpu.shares metric. +func (mb *MetricsBuilder) RecordContainerCPUSharesDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerCPUShares.recordDataPoint(mb.startTime, ts, val) +} + // RecordContainerCPUThrottlingDataPeriodsDataPoint adds a data point to container.cpu.throttling_data.periods metric. func (mb *MetricsBuilder) RecordContainerCPUThrottlingDataPeriodsDataPoint(ts pcommon.Timestamp, val int64) { mb.metricContainerCPUThrottlingDataPeriods.recordDataPoint(mb.startTime, ts, val) @@ -4053,6 +4221,11 @@ func (mb *MetricsBuilder) RecordContainerPidsLimitDataPoint(ts pcommon.Timestamp mb.metricContainerPidsLimit.recordDataPoint(mb.startTime, ts, val) } +// RecordContainerRestartsDataPoint adds a data point to container.restarts metric. +func (mb *MetricsBuilder) RecordContainerRestartsDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricContainerRestarts.recordDataPoint(mb.startTime, ts, val) +} + // RecordContainerUptimeDataPoint adds a data point to container.uptime metric. func (mb *MetricsBuilder) RecordContainerUptimeDataPoint(ts pcommon.Timestamp, val float64) { mb.metricContainerUptime.recordDataPoint(mb.startTime, ts, val) diff --git a/receiver/dockerstatsreceiver/internal/metadata/generated_metrics_test.go b/receiver/dockerstatsreceiver/internal/metadata/generated_metrics_test.go index 6fe3e0d65ced..e52c3f062d2f 100644 --- a/receiver/dockerstatsreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/dockerstatsreceiver/internal/metadata/generated_metrics_test.go @@ -80,6 +80,12 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordContainerBlockioSectorsRecursiveDataPoint(ts, 1, "device_major-val", "device_minor-val", "operation-val") + allMetricsCount++ + mb.RecordContainerCPULimitDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordContainerCPUSharesDataPoint(ts, 1) + allMetricsCount++ mb.RecordContainerCPUThrottlingDataPeriodsDataPoint(ts, 1) @@ -264,6 +270,9 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordContainerPidsLimitDataPoint(ts, 1) + allMetricsCount++ + mb.RecordContainerRestartsDataPoint(ts, 1) + allMetricsCount++ mb.RecordContainerUptimeDataPoint(ts, 1) @@ -481,6 +490,30 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok = dp.Attributes().Get("operation") assert.True(t, ok) assert.EqualValues(t, "operation-val", attrVal.Str()) + case "container.cpu.limit": + assert.False(t, validatedMetrics["container.cpu.limit"], "Found a duplicate in the metrics slice: container.cpu.limit") + validatedMetrics["container.cpu.limit"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "CPU limit set for the container.", ms.At(i).Description()) + assert.Equal(t, "{cpus}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + case "container.cpu.shares": + assert.False(t, validatedMetrics["container.cpu.shares"], "Found a duplicate in the metrics slice: container.cpu.shares") + validatedMetrics["container.cpu.shares"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "CPU shares set for the container.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) case "container.cpu.throttling_data.periods": assert.False(t, validatedMetrics["container.cpu.throttling_data.periods"], "Found a duplicate in the metrics slice: container.cpu.throttling_data.periods") validatedMetrics["container.cpu.throttling_data.periods"] = true @@ -1302,6 +1335,20 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) + case "container.restarts": + assert.False(t, validatedMetrics["container.restarts"], "Found a duplicate in the metrics slice: container.restarts") + validatedMetrics["container.restarts"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of restarts for the container.", ms.At(i).Description()) + assert.Equal(t, "{restarts}", ms.At(i).Unit()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) case "container.uptime": assert.False(t, validatedMetrics["container.uptime"], "Found a duplicate in the metrics slice: container.uptime") validatedMetrics["container.uptime"] = true diff --git a/receiver/dockerstatsreceiver/internal/metadata/testdata/config.yaml b/receiver/dockerstatsreceiver/internal/metadata/testdata/config.yaml index 8f9f8a706831..91acbeaa86aa 100644 --- a/receiver/dockerstatsreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/dockerstatsreceiver/internal/metadata/testdata/config.yaml @@ -17,6 +17,10 @@ all_set: enabled: true container.blockio.sectors_recursive: enabled: true + container.cpu.limit: + enabled: true + container.cpu.shares: + enabled: true container.cpu.throttling_data.periods: enabled: true container.cpu.throttling_data.throttled_periods: @@ -131,6 +135,8 @@ all_set: enabled: true container.pids.limit: enabled: true + container.restarts: + enabled: true container.uptime: enabled: true resource_attributes: @@ -166,6 +172,10 @@ none_set: enabled: false container.blockio.sectors_recursive: enabled: false + container.cpu.limit: + enabled: false + container.cpu.shares: + enabled: false container.cpu.throttling_data.periods: enabled: false container.cpu.throttling_data.throttled_periods: @@ -280,6 +290,8 @@ none_set: enabled: false container.pids.limit: enabled: false + container.restarts: + enabled: false container.uptime: enabled: false resource_attributes: diff --git a/receiver/dockerstatsreceiver/metadata.yaml b/receiver/dockerstatsreceiver/metadata.yaml index df6e075f0458..b14799670197 100644 --- a/receiver/dockerstatsreceiver/metadata.yaml +++ b/receiver/dockerstatsreceiver/metadata.yaml @@ -137,6 +137,21 @@ metrics: unit: "1" gauge: value_type: double + container.cpu.limit: + enabled: false + description: "CPU limit set for the container." + extended_documentation: "This metric is only reported if the container has limits set with -cpus, -cpuset-cpus or -cpu-quota." + unit: "{cpus}" + gauge: + value_type: double + container.cpu.shares: + enabled: false + description: "CPU shares set for the container." + unit: "1" + gauge: + value_type: int + + # Memory container.memory.usage.limit: enabled: true @@ -669,3 +684,13 @@ metrics: unit: s gauge: value_type: double + + # Container + container.restarts: + enabled: false + description: "Number of restarts for the container." + unit: "{restarts}" + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative diff --git a/receiver/dockerstatsreceiver/metric_helper.go b/receiver/dockerstatsreceiver/metric_helper.go index 530a8ef5766c..d4070f196959 100644 --- a/receiver/dockerstatsreceiver/metric_helper.go +++ b/receiver/dockerstatsreceiver/metric_helper.go @@ -4,9 +4,16 @@ package dockerstatsreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/dockerstatsreceiver" import ( + "fmt" + "strconv" + "strings" + dtypes "github.com/docker/docker/api/types" + ctypes "github.com/docker/docker/api/types/container" ) +const nanosInASecond = 1e9 + // Following functions has been copied from: calculateCPUPercentUnix(), calculateMemUsageUnixNoCache(), calculateMemPercentUnixNoCache() // https://github.com/docker/cli/blob/a2e9ed3b874fccc177b9349f3b0277612403934f/cli/command/container/stats_helpers.go @@ -71,3 +78,57 @@ func calculateMemoryPercent(limit uint64, usedNoCache uint64) float64 { } return 0.0 } + +// calculateCPULimit calculate the number of cpus assigned to a container. +// +// Calculation is based on 3 alternatives by the following order: +// - nanocpus: if set by i.e docker run -cpus=2 +// - cpusetCpus: if set by i.e docker run -docker run -cpuset-cpus="0,2" +// - cpuquota: if set by i.e docker run -cpu-quota=50000 +// +// See https://docs.docker.com/config/containers/resource_constraints/#configure-the-default-cfs-scheduler for background. +func calculateCPULimit(hostConfig *ctypes.HostConfig) (float64, error) { + var cpuLimit float64 + var err error + + switch { + case hostConfig.NanoCPUs > 0: + cpuLimit = float64(hostConfig.NanoCPUs) / nanosInASecond + case hostConfig.CpusetCpus != "": + cpuLimit, err = parseCPUSet(hostConfig.CpusetCpus) + if err != nil { + return cpuLimit, err + } + case hostConfig.CPUQuota > 0: + period := hostConfig.CPUPeriod + if period == 0 { + period = 100000 // Default CFS Period + } + cpuLimit = float64(hostConfig.CPUQuota) / float64(period) + } + return cpuLimit, nil +} + +// parseCPUSet helper function to decompose -cpuset-cpus value into number os cpus. +func parseCPUSet(line string) (float64, error) { + var numCPUs uint64 + + lineSlice := strings.Split(line, ",") + for _, l := range lineSlice { + lineParts := strings.Split(l, "-") + if len(lineParts) == 2 { + p0, err0 := strconv.Atoi(lineParts[0]) + if err0 != nil { + return 0, fmt.Errorf("invalid -cpuset-cpus value: %w", err0) + } + p1, err1 := strconv.Atoi(lineParts[1]) + if err1 != nil { + return 0, fmt.Errorf("invalid -cpuset-cpus value: %w", err1) + } + numCPUs += uint64(p1 - p0 + 1) + } else if len(lineParts) == 1 { + numCPUs++ + } + } + return float64(numCPUs), nil +} diff --git a/receiver/dockerstatsreceiver/metric_helper_test.go b/receiver/dockerstatsreceiver/metric_helper_test.go new file mode 100644 index 000000000000..04402519c714 --- /dev/null +++ b/receiver/dockerstatsreceiver/metric_helper_test.go @@ -0,0 +1,121 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package dockerstatsreceiver + +import ( + "errors" + "testing" + + ctypes "github.com/docker/docker/api/types/container" + "github.com/stretchr/testify/assert" +) + +func Test_calculateCPULimit1(t *testing.T) { + tests := []struct { + name string + args *ctypes.HostConfig + want float64 + err error + }{ + { + "Test CPULimit", + &ctypes.HostConfig{ + Resources: ctypes.Resources{ + NanoCPUs: 2500000000, + }, + }, + 2.5, + nil, + }, + { + "Test CPUSetCpu", + &ctypes.HostConfig{ + Resources: ctypes.Resources{ + CpusetCpus: "0-2", + }, + }, + 3, + nil, + }, + { + "Test CPUQuota", + &ctypes.HostConfig{ + Resources: ctypes.Resources{ + CPUQuota: 50000, + }, + }, + 0.5, + nil, + }, + { + "Test CPUQuota Custom Period", + &ctypes.HostConfig{ + Resources: ctypes.Resources{ + CPUQuota: 300000, + CPUPeriod: 200000, + }, + }, + 1.5, + nil, + }, + { + "Test Default", + &ctypes.HostConfig{ + Resources: ctypes.Resources{ + NanoCPUs: 1800000000, + CpusetCpus: "0-1", + CPUQuota: 400000, + }, + }, + 1.8, + nil, + }, + { + "Test No Values", + &ctypes.HostConfig{ + Resources: ctypes.Resources{}, + }, + 0, + nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + want, err := calculateCPULimit(tt.args) + assert.Equalf(t, tt.want, want, "calculateCPULimit(%v)", tt.args) + assert.Equalf(t, tt.err, err, "calculateCPULimit(%v)", tt.args) + }) + } +} + +func Test_parseCPUSet(t *testing.T) { + tests := []struct { + input string + expected float64 + err error + }{ + {"0,2", 2, nil}, + {"0-2", 3, nil}, + {"0-2,4", 4, nil}, + {"0-2,4-5", 5, nil}, + {"a-b", 0, errors.New("invalid -cpuset-cpus value: strconv.Atoi: parsing \"a\": invalid syntax")}, + {"", 1, nil}, + } + + for _, test := range tests { + result, err := parseCPUSet(test.input) + + if err != nil && test.err != nil { + if err.Error() != test.err.Error() { + t.Errorf("parseCPUSet(%s) returned error %v, expected %v", test.input, err, test.err) + } + } else if !errors.Is(err, test.err) { + t.Errorf("parseCPUSet(%s) returned error %v, expected %v", test.input, err, test.err) + } + + if result != test.expected { + t.Errorf("parseCPUSet(%s) returned %f, expected %f", test.input, result, test.expected) + } + } +} diff --git a/receiver/dockerstatsreceiver/receiver.go b/receiver/dockerstatsreceiver/receiver.go index e848a4914420..89d9ff4e2d1d 100644 --- a/receiver/dockerstatsreceiver/receiver.go +++ b/receiver/dockerstatsreceiver/receiver.go @@ -25,8 +25,8 @@ import ( ) const ( - defaultDockerAPIVersion = 1.23 - minimalRequiredDockerAPIVersion = 1.22 + defaultDockerAPIVersion = 1.25 + minimalRequiredDockerAPIVersion = 1.25 ) type resultV2 struct { @@ -121,6 +121,10 @@ func (r *metricsReceiver) recordContainerStats(now pcommon.Timestamp, containerS if err := r.recordBaseMetrics(now, container.ContainerJSONBase); err != nil { errs = multierr.Append(errs, err) } + if err := r.recordHostConfigMetrics(now, container.ContainerJSON); err != nil { + errs = multierr.Append(errs, err) + } + r.mb.RecordContainerRestartsDataPoint(now, int64(container.RestartCount)) // Always-present resource attrs + the user-configured resource attrs rb := r.mb.NewResourceBuilder() @@ -279,3 +283,16 @@ func (r *metricsReceiver) recordBaseMetrics(now pcommon.Timestamp, base *types.C } return nil } + +func (r *metricsReceiver) recordHostConfigMetrics(now pcommon.Timestamp, containerJSON *dtypes.ContainerJSON) error { + r.mb.RecordContainerCPUSharesDataPoint(now, containerJSON.HostConfig.CPUShares) + + cpuLimit, err := calculateCPULimit(containerJSON.HostConfig) + if err != nil { + return scrapererror.NewPartialScrapeError(fmt.Errorf("error retrieving container.cpu.limit: %w", err), 1) + } + if cpuLimit > 0 { + r.mb.RecordContainerCPULimitDataPoint(now, cpuLimit) + } + return nil +} diff --git a/receiver/dockerstatsreceiver/receiver_test.go b/receiver/dockerstatsreceiver/receiver_test.go index a13b16c06dcf..a25faf686faf 100644 --- a/receiver/dockerstatsreceiver/receiver_test.go +++ b/receiver/dockerstatsreceiver/receiver_test.go @@ -43,6 +43,8 @@ var ( ContainerBlockioIoTimeRecursive: metricEnabled, ContainerBlockioIoWaitTimeRecursive: metricEnabled, ContainerBlockioSectorsRecursive: metricEnabled, + ContainerCPULimit: metricEnabled, + ContainerCPUShares: metricEnabled, ContainerCPUUtilization: metricEnabled, ContainerCPUThrottlingDataPeriods: metricEnabled, ContainerCPUThrottlingDataThrottledPeriods: metricEnabled, @@ -99,6 +101,7 @@ var ( ContainerPidsCount: metricEnabled, ContainerPidsLimit: metricEnabled, ContainerUptime: metricEnabled, + ContainerRestarts: metricEnabled, ContainerMemoryAnon: metricEnabled, ContainerMemoryFile: metricEnabled, } @@ -165,9 +168,9 @@ func TestScrapeV2(t *testing.T) { t.Helper() containerID := "10b703fb312b25e8368ab5a3bce3a1610d1cee5d71a94920f1a7adbc5b0cb326" mockServer, err := dockerMockServer(&map[string]string{ - "/v1.23/containers/json": filepath.Join(mockFolder, "single_container", "containers.json"), - "/v1.23/containers/" + containerID + "/json": filepath.Join(mockFolder, "single_container", "container.json"), - "/v1.23/containers/" + containerID + "/stats": filepath.Join(mockFolder, "single_container", "stats.json"), + "/v1.25/containers/json": filepath.Join(mockFolder, "single_container", "containers.json"), + "/v1.25/containers/" + containerID + "/json": filepath.Join(mockFolder, "single_container", "container.json"), + "/v1.25/containers/" + containerID + "/stats": filepath.Join(mockFolder, "single_container", "stats.json"), }) require.NoError(t, err) return mockServer @@ -186,11 +189,11 @@ func TestScrapeV2(t *testing.T) { "a359c0fc87c546b42d2ad32db7c978627f1d89b49cb3827a7b19ba97a1febcce", } mockServer, err := dockerMockServer(&map[string]string{ - "/v1.23/containers/json": filepath.Join(mockFolder, "two_containers", "containers.json"), - "/v1.23/containers/" + containerIDs[0] + "/json": filepath.Join(mockFolder, "two_containers", "container1.json"), - "/v1.23/containers/" + containerIDs[1] + "/json": filepath.Join(mockFolder, "two_containers", "container2.json"), - "/v1.23/containers/" + containerIDs[0] + "/stats": filepath.Join(mockFolder, "two_containers", "stats1.json"), - "/v1.23/containers/" + containerIDs[1] + "/stats": filepath.Join(mockFolder, "two_containers", "stats2.json"), + "/v1.25/containers/json": filepath.Join(mockFolder, "two_containers", "containers.json"), + "/v1.25/containers/" + containerIDs[0] + "/json": filepath.Join(mockFolder, "two_containers", "container1.json"), + "/v1.25/containers/" + containerIDs[1] + "/json": filepath.Join(mockFolder, "two_containers", "container2.json"), + "/v1.25/containers/" + containerIDs[0] + "/stats": filepath.Join(mockFolder, "two_containers", "stats1.json"), + "/v1.25/containers/" + containerIDs[1] + "/stats": filepath.Join(mockFolder, "two_containers", "stats2.json"), }) require.NoError(t, err) return mockServer @@ -206,9 +209,9 @@ func TestScrapeV2(t *testing.T) { t.Helper() containerID := "10b703fb312b25e8368ab5a3bce3a1610d1cee5d71a94920f1a7adbc5b0cb326" mockServer, err := dockerMockServer(&map[string]string{ - "/v1.23/containers/json": filepath.Join(mockFolder, "no_pids_stats", "containers.json"), - "/v1.23/containers/" + containerID + "/json": filepath.Join(mockFolder, "no_pids_stats", "container.json"), - "/v1.23/containers/" + containerID + "/stats": filepath.Join(mockFolder, "no_pids_stats", "stats.json"), + "/v1.25/containers/json": filepath.Join(mockFolder, "no_pids_stats", "containers.json"), + "/v1.25/containers/" + containerID + "/json": filepath.Join(mockFolder, "no_pids_stats", "container.json"), + "/v1.25/containers/" + containerID + "/stats": filepath.Join(mockFolder, "no_pids_stats", "stats.json"), }) require.NoError(t, err) return mockServer @@ -224,9 +227,27 @@ func TestScrapeV2(t *testing.T) { t.Helper() containerID := "78de07328afff50a9777b07dd36a28c709dffe081baaf67235db618843399643" mockServer, err := dockerMockServer(&map[string]string{ - "/v1.23/containers/json": filepath.Join(mockFolder, "pids_stats_max", "containers.json"), - "/v1.23/containers/" + containerID + "/json": filepath.Join(mockFolder, "pids_stats_max", "container.json"), - "/v1.23/containers/" + containerID + "/stats": filepath.Join(mockFolder, "pids_stats_max", "stats.json"), + "/v1.25/containers/json": filepath.Join(mockFolder, "pids_stats_max", "containers.json"), + "/v1.25/containers/" + containerID + "/json": filepath.Join(mockFolder, "pids_stats_max", "container.json"), + "/v1.25/containers/" + containerID + "/stats": filepath.Join(mockFolder, "pids_stats_max", "stats.json"), + }) + require.NoError(t, err) + return mockServer + }, + cfgBuilder: newTestConfigBuilder(). + withDefaultLabels(). + withMetrics(allMetricsEnabled), + }, + { + desc: "scrapeV2_cpu_limit", + expectedMetricsFile: filepath.Join(mockFolder, "cpu_limit", "expected_metrics.yaml"), + mockDockerEngine: func(t *testing.T) *httptest.Server { + t.Helper() + containerID := "9b842c47c1c3e4ee931e2c9713cf4e77aa09acc2201aea60fba04b6dbba6c674" + mockServer, err := dockerMockServer(&map[string]string{ + "/v1.25/containers/json": filepath.Join(mockFolder, "cpu_limit", "containers.json"), + "/v1.25/containers/" + containerID + "/json": filepath.Join(mockFolder, "cpu_limit", "container.json"), + "/v1.25/containers/" + containerID + "/stats": filepath.Join(mockFolder, "cpu_limit", "stats.json"), }) require.NoError(t, err) return mockServer @@ -241,9 +262,9 @@ func TestScrapeV2(t *testing.T) { mockDockerEngine: func(t *testing.T) *httptest.Server { containerID := "f97ed5bca0a5a0b85bfd52c4144b96174e825c92a138bc0458f0e196f2c7c1b4" mockServer, err := dockerMockServer(&map[string]string{ - "/v1.23/containers/json": filepath.Join(mockFolder, "cgroups_v2", "containers.json"), - "/v1.23/containers/" + containerID + "/json": filepath.Join(mockFolder, "cgroups_v2", "container.json"), - "/v1.23/containers/" + containerID + "/stats": filepath.Join(mockFolder, "cgroups_v2", "stats.json"), + "/v1.25/containers/json": filepath.Join(mockFolder, "cgroups_v2", "containers.json"), + "/v1.25/containers/" + containerID + "/json": filepath.Join(mockFolder, "cgroups_v2", "container.json"), + "/v1.25/containers/" + containerID + "/stats": filepath.Join(mockFolder, "cgroups_v2", "stats.json"), }) require.NoError(t, err) return mockServer @@ -258,9 +279,9 @@ func TestScrapeV2(t *testing.T) { mockDockerEngine: func(t *testing.T) *httptest.Server { containerID := "73364842ef014441cac89fed05df19463b1230db25a31252cdf82e754f1ec581" mockServer, err := dockerMockServer(&map[string]string{ - "/v1.23/containers/json": filepath.Join(mockFolder, "single_container_with_optional_resource_attributes", "containers.json"), - "/v1.23/containers/" + containerID + "/json": filepath.Join(mockFolder, "single_container_with_optional_resource_attributes", "container.json"), - "/v1.23/containers/" + containerID + "/stats": filepath.Join(mockFolder, "single_container_with_optional_resource_attributes", "stats.json"), + "/v1.25/containers/json": filepath.Join(mockFolder, "single_container_with_optional_resource_attributes", "containers.json"), + "/v1.25/containers/" + containerID + "/json": filepath.Join(mockFolder, "single_container_with_optional_resource_attributes", "container.json"), + "/v1.25/containers/" + containerID + "/stats": filepath.Join(mockFolder, "single_container_with_optional_resource_attributes", "stats.json"), }) require.NoError(t, err) return mockServer diff --git a/receiver/dockerstatsreceiver/testdata/config.yaml b/receiver/dockerstatsreceiver/testdata/config.yaml index 821033ddcb84..57b2858bb806 100644 --- a/receiver/dockerstatsreceiver/testdata/config.yaml +++ b/receiver/dockerstatsreceiver/testdata/config.yaml @@ -3,7 +3,7 @@ docker_stats/allsettings: endpoint: http://example.com/ collection_interval: 2s timeout: 20s - api_version: 1.24 + api_version: 1.25 container_labels_to_metric_labels: my.container.label: my-metric-label my.other.container.label: my-other-metric-label diff --git a/receiver/dockerstatsreceiver/testdata/mock/cgroups_v2/expected_metrics.yaml b/receiver/dockerstatsreceiver/testdata/mock/cgroups_v2/expected_metrics.yaml index 1192f4e4f89c..756751d84ee6 100644 --- a/receiver/dockerstatsreceiver/testdata/mock/cgroups_v2/expected_metrics.yaml +++ b/receiver/dockerstatsreceiver/testdata/mock/cgroups_v2/expected_metrics.yaml @@ -52,6 +52,14 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: By + - description: CPU shares set for the container. + gauge: + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: container.cpu.shares + unit: "1" - description: Number of periods with throttling active. name: container.cpu.throttling_data.periods sum: @@ -122,12 +130,12 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: ns - - description: 'Percent of CPU used by the container.' + - description: Percent of CPU used by the container. gauge: dataPoints: - asDouble: 0.041326615629205886 - startTimeUnixNano: "1687762436307743000" - timeUnixNano: "1687762436315926000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: container.cpu.utilization unit: "1" - description: The amount of anonymous memory that has been identified as active by the kernel. @@ -378,10 +386,20 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" unit: '{pids}' + - description: Number of restarts for the container. + name: container.restarts + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{restarts}' - description: Time elapsed since container start time. gauge: dataPoints: - - asDouble: 5.343272557349942e+06 + - asDouble: 1.5026932099271942e+07 startTimeUnixNano: "1000000" timeUnixNano: "2000000" name: container.uptime diff --git a/receiver/dockerstatsreceiver/testdata/mock/cpu_limit/container.json b/receiver/dockerstatsreceiver/testdata/mock/cpu_limit/container.json new file mode 100644 index 000000000000..f7c0905aebf5 --- /dev/null +++ b/receiver/dockerstatsreceiver/testdata/mock/cpu_limit/container.json @@ -0,0 +1,196 @@ +{ + "Id": "9b842c47c1c3e4ee931e2c9713cf4e77aa09acc2201aea60fba04b6dbba6c674", + "Created": "2023-04-17T13:51:04.607496655Z", + "Path": "sleep", + "Args": [ + "infinity" + ], + "State": { + "Status": "running", + "Running": true, + "Paused": false, + "Restarting": false, + "OOMKilled": false, + "Dead": false, + "Pid": 135886, + "ExitCode": 0, + "Error": "", + "StartedAt": "2023-04-19T14:07:07.809461484Z", + "FinishedAt": "2023-04-19T14:06:53.167608711Z" + }, + "Image": "sha256:3fbaf71a998bae6e375be74b999bd418091bf6511e356a129fdc969c4a94a5bc", + "ResolvConfPath": "/var/lib/docker/containers/9b842c47c1c3e4ee931e2c9713cf4e77aa09acc2201aea60fba04b6dbba6c674/resolv.conf", + "HostnamePath": "/var/lib/docker/containers/9b842c47c1c3e4ee931e2c9713cf4e77aa09acc2201aea60fba04b6dbba6c674/hostname", + "HostsPath": "/var/lib/docker/containers/9b842c47c1c3e4ee931e2c9713cf4e77aa09acc2201aea60fba04b6dbba6c674/hosts", + "LogPath": "/var/lib/docker/containers/9b842c47c1c3e4ee931e2c9713cf4e77aa09acc2201aea60fba04b6dbba6c674/9b842c47c1c3e4ee931e2c9713cf4e77aa09acc2201aea60fba04b6dbba6c674-json.log", + "Name": "/sleepy1", + "RestartCount": 0, + "Driver": "devicemapper", + "Platform": "linux", + "MountLabel": "", + "ProcessLabel": "", + "AppArmorProfile": "docker-default", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "ContainerIDFile": "", + "LogConfig": { + "Type": "json-file", + "Config": {} + }, + "NetworkMode": "default", + "PortBindings": {}, + "RestartPolicy": { + "Name": "no", + "MaximumRetryCount": 0 + }, + "AutoRemove": false, + "VolumeDriver": "", + "VolumesFrom": null, + "CapAdd": null, + "CapDrop": null, + "CgroupnsMode": "private", + "Dns": [], + "DnsOptions": [], + "DnsSearch": [], + "ExtraHosts": null, + "GroupAdd": null, + "IpcMode": "private", + "Cgroup": "", + "Links": null, + "OomScoreAdj": 0, + "PidMode": "", + "Privileged": false, + "PublishAllPorts": false, + "ReadonlyRootfs": false, + "SecurityOpt": null, + "UTSMode": "", + "UsernsMode": "", + "ShmSize": 67108864, + "Runtime": "runc", + "ConsoleSize": [ + 0, + 0 + ], + "Isolation": "", + "CpuShares": 0, + "Memory": 0, + "NanoCpus": 0, + "CgroupParent": "", + "BlkioWeight": 0, + "BlkioWeightDevice": [], + "BlkioDeviceReadBps": null, + "BlkioDeviceWriteBps": null, + "BlkioDeviceReadIOps": null, + "BlkioDeviceWriteIOps": null, + "CpuPeriod": 0, + "CpuQuota": 0, + "CpuRealtimePeriod": 0, + "CpuRealtimeRuntime": 0, + "CpusetCpus": "0,3", + "CpusetMems": "", + "Devices": [], + "DeviceCgroupRules": null, + "DeviceRequests": null, + "KernelMemory": 0, + "KernelMemoryTCP": 0, + "MemoryReservation": 0, + "MemorySwap": 0, + "MemorySwappiness": null, + "OomKillDisable": null, + "PidsLimit": null, + "Ulimits": null, + "CpuCount": 0, + "CpuPercent": 0, + "IOMaximumIOps": 0, + "IOMaximumBandwidth": 0, + "MaskedPaths": [ + "/proc/asound", + "/proc/acpi", + "/proc/kcore", + "/proc/keys", + "/proc/latency_stats", + "/proc/timer_list", + "/proc/timer_stats", + "/proc/sched_debug", + "/proc/scsi", + "/sys/firmware" + ], + "ReadonlyPaths": [ + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger" + ] + }, + "GraphDriver": { + "Data": { + "DeviceId": "4", + "DeviceName": "docker-253:0-1050151-b0997978b757cf1dc712ad50496bf49e85cfd24d8b1c61853c16a0eec0ed4176", + "DeviceSize": "10737418240" + }, + "Name": "devicemapper" + }, + "Mounts": [], + "Config": { + "Hostname": "78de07328aff", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Cmd": [ + "sleep", + "infinity" + ], + "Image": "busybox", + "Volumes": null, + "WorkingDir": "", + "Entrypoint": null, + "OnBuild": null, + "Labels": {} + }, + "NetworkSettings": { + "Bridge": "", + "SandboxID": "384e9c0ba138cdcf78d8abdbb0c55b725ff83d0d02ba3c7aa170b9c38ba5e1fc", + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "Ports": {}, + "SandboxKey": "/var/run/docker/netns/384e9c0ba138", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "dccc9fc92b4d33e9a0b0f66c1daaf528e4241259d5f7609b93740c87765c7649", + "Gateway": "172.17.0.1", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "MacAddress": "02:42:ac:11:00:02", + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "8dd6b2854086c51888ebfaca18940146b4ccfc332a9bc3fbe7af7b4d9645bbce", + "EndpointID": "dccc9fc92b4d33e9a0b0f66c1daaf528e4241259d5f7609b93740c87765c7649", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:02", + "DriverOpts": null + } + } + } +} diff --git a/receiver/dockerstatsreceiver/testdata/mock/cpu_limit/containers.json b/receiver/dockerstatsreceiver/testdata/mock/cpu_limit/containers.json new file mode 100644 index 000000000000..a693f919c4df --- /dev/null +++ b/receiver/dockerstatsreceiver/testdata/mock/cpu_limit/containers.json @@ -0,0 +1,39 @@ +[ + { + "Id": "9b842c47c1c3e4ee931e2c9713cf4e77aa09acc2201aea60fba04b6dbba6c674", + "Names": [ + "/sleepy1" + ], + "Image": "busybox", + "ImageID": "sha256:3fbaf71a998bae6e375be74b999bd418091bf6511e356a129fdc969c4a94a5bc", + "Command": "sleep infinity", + "Created": 1681739464, + "Ports": [], + "Labels": {}, + "State": "running", + "Status": "Up 4 days", + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "8dd6b2854086c51888ebfaca18940146b4ccfc332a9bc3fbe7af7b4d9645bbce", + "EndpointID": "dccc9fc92b4d33e9a0b0f66c1daaf528e4241259d5f7609b93740c87765c7649", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:02", + "DriverOpts": null + } + } + }, + "Mounts": [] + } +] diff --git a/receiver/dockerstatsreceiver/testdata/mock/cpu_limit/expected_metrics.yaml b/receiver/dockerstatsreceiver/testdata/mock/cpu_limit/expected_metrics.yaml new file mode 100644 index 000000000000..08d6c88004dd --- /dev/null +++ b/receiver/dockerstatsreceiver/testdata/mock/cpu_limit/expected_metrics.yaml @@ -0,0 +1,469 @@ +resourceMetrics: + - resource: + attributes: + - key: container.hostname + value: + stringValue: 78de07328aff + - key: container.id + value: + stringValue: 9b842c47c1c3e4ee931e2c9713cf4e77aa09acc2201aea60fba04b6dbba6c674 + - key: container.image.name + value: + stringValue: busybox + - key: container.name + value: + stringValue: sleepy1 + - key: container.runtime + value: + stringValue: docker + schemaUrl: https://opentelemetry.io/schemas/1.6.1 + scopeMetrics: + - metrics: + - description: Number of bytes transferred to/from the disk by the group and descendant groups. + name: container.blockio.io_service_bytes_recursive + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1998848" + attributes: + - key: device_major + value: + stringValue: "253" + - key: device_minor + value: + stringValue: "1" + - key: operation + value: + stringValue: read + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "253" + - key: device_minor + value: + stringValue: "1" + - key: operation + value: + stringValue: write + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "1998848" + attributes: + - key: device_major + value: + stringValue: "253" + - key: device_minor + value: + stringValue: "2" + - key: operation + value: + stringValue: read + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "253" + - key: device_minor + value: + stringValue: "2" + - key: operation + value: + stringValue: write + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "1998848" + attributes: + - key: device_major + value: + stringValue: "7" + - key: device_minor + value: + stringValue: "2" + - key: operation + value: + stringValue: read + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: device_major + value: + stringValue: "7" + - key: device_minor + value: + stringValue: "2" + - key: operation + value: + stringValue: write + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: By + - description: CPU limit set for the container. + gauge: + dataPoints: + - asDouble: 2 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: container.cpu.limit + unit: '{cpus}' + - description: CPU shares set for the container. + gauge: + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: container.cpu.shares + unit: "1" + - description: Number of periods with throttling active. + name: container.cpu.throttling_data.periods + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{periods}' + - description: Number of periods when the container hits its throttling limit. + name: container.cpu.throttling_data.throttled_periods + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{periods}' + - description: Aggregate time the container was throttled. + name: container.cpu.throttling_data.throttled_time + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: Time spent by tasks of the cgroup in kernel mode (Linux). Time spent by all container processes in kernel mode (Windows). + name: container.cpu.usage.kernelmode + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "5467000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: System CPU usage, as reported by docker. + name: container.cpu.usage.system + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "183556380000000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: Total CPU time consumed. + name: container.cpu.usage.total + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "10935000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: Time spent by tasks of the cgroup in user mode (Linux). Time spent by all container processes in user mode (Windows). + name: container.cpu.usage.usermode + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "5467000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ns + - description: Percent of CPU used by the container. + gauge: + dataPoints: + - asDouble: 0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: container.cpu.utilization + unit: "1" + - description: The amount of anonymous memory that has been identified as active by the kernel. + name: container.memory.active_anon + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "4096" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Cache memory that has been identified as active by the kernel. + name: container.memory.active_file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Amount of memory used in anonymous mappings such as brk(), sbrk(), and mmap(MAP_ANONYMOUS) (Only available with cgroups v2). + name: container.memory.anon + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "61440" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Amount of memory used to cache filesystem data, including tmpfs and shared memory (Only available with cgroups v2). + name: container.memory.file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "233848832" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The amount of anonymous memory that has been identified as inactive by the kernel. + name: container.memory.inactive_anon + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "110592" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Cache memory that has been identified as inactive by the kernel. + name: container.memory.inactive_file + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1892352" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Percentage of memory used. + gauge: + dataPoints: + - asDouble: 0.016875995187363255 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: container.memory.percent + unit: "1" + - description: Indicate the number of times that a process of the cgroup triggered a page fault. + name: container.memory.pgfault + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1029" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{faults}' + - description: Indicate the number of times that a process of the cgroup triggered a major fault. + name: container.memory.pgmajfault + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "12" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{faults}' + - description: The amount of memory that cannot be reclaimed. + name: container.memory.unevictable + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Memory limit of the container. + name: container.memory.usage.limit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "2063048704" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Maximum memory usage. + name: container.memory.usage.max + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Memory usage of the container. This excludes the cache. + name: container.memory.usage.total + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "348160" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Bytes received by the container. + name: container.network.io.usage.rx_bytes + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "3608" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: By + - description: Incoming packets dropped. + name: container.network.io.usage.rx_dropped + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{packets}' + - description: Received errors. + name: container.network.io.usage.rx_errors + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{errors}' + - description: Packets received. + name: container.network.io.usage.rx_packets + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "44" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{packets}' + - description: Bytes sent. + name: container.network.io.usage.tx_bytes + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: By + - description: Outgoing packets dropped. + name: container.network.io.usage.tx_dropped + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{packets}' + - description: Sent errors. + name: container.network.io.usage.tx_errors + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{errors}' + - description: Packets sent. + name: container.network.io.usage.tx_packets + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: interface + value: + stringValue: eth0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{packets}' + - description: Number of pids in the container's cgroup. + name: container.pids.count + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{pids}' + - description: Maximum number of pids in the container's cgroup. + name: container.pids.limit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "2192" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{pids}' + - description: Number of restarts for the container. + name: container.restarts + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{restarts}' + - description: Time elapsed since container start time. + gauge: + dataPoints: + - asDouble: 1.5532824571167516e+07 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: container.uptime + unit: s + scope: + name: otelcol/dockerstatsreceiver + version: latest diff --git a/receiver/dockerstatsreceiver/testdata/mock/cpu_limit/stats.json b/receiver/dockerstatsreceiver/testdata/mock/cpu_limit/stats.json new file mode 100644 index 000000000000..68509e8f1058 --- /dev/null +++ b/receiver/dockerstatsreceiver/testdata/mock/cpu_limit/stats.json @@ -0,0 +1,136 @@ +{ + "read": "2023-04-24T12:23:08.456710245Z", + "preread": "2023-04-24T12:23:07.447356277Z", + "pids_stats": { + "current": 1, + "limit": 2192 + }, + "blkio_stats": { + "io_service_bytes_recursive": [ + { + "major": 7, + "minor": 2, + "op": "read", + "value": 1998848 + }, + { + "major": 7, + "minor": 2, + "op": "write", + "value": 0 + }, + { + "major": 253, + "minor": 1, + "op": "read", + "value": 1998848 + }, + { + "major": 253, + "minor": 1, + "op": "write", + "value": 0 + }, + { + "major": 253, + "minor": 2, + "op": "read", + "value": 1998848 + }, + { + "major": 253, + "minor": 2, + "op": "write", + "value": 0 + } + ], + "io_serviced_recursive": null, + "io_queue_recursive": null, + "io_service_time_recursive": null, + "io_wait_time_recursive": null, + "io_merged_recursive": null, + "io_time_recursive": null, + "sectors_recursive": null + }, + "num_procs": 0, + "storage_stats": {}, + "cpu_stats": { + "cpu_usage": { + "total_usage": 10935000, + "usage_in_kernelmode": 5467000, + "usage_in_usermode": 5467000 + }, + "system_cpu_usage": 183556380000000, + "online_cpus": 2, + "throttling_data": { + "periods": 0, + "throttled_periods": 0, + "throttled_time": 0 + } + }, + "precpu_stats": { + "cpu_usage": { + "total_usage": 10935000, + "usage_in_kernelmode": 5467000, + "usage_in_usermode": 5467000 + }, + "system_cpu_usage": 183554360000000, + "online_cpus": 2, + "throttling_data": { + "periods": 0, + "throttled_periods": 0, + "throttled_time": 0 + } + }, + "memory_stats": { + "usage": 2240512, + "stats": { + "active_anon": 4096, + "active_file": 0, + "anon": 61440, + "anon_thp": 0, + "file": 233848832, + "file_dirty": 0, + "file_mapped": 1138688, + "file_writeback": 0, + "inactive_anon": 110592, + "inactive_file": 1892352, + "kernel_stack": 16384, + "pgactivate": 0, + "pgdeactivate": 0, + "pgfault": 1029, + "pglazyfree": 0, + "pglazyfreed": 0, + "pgmajfault": 12, + "pgrefill": 0, + "pgscan": 0, + "pgsteal": 0, + "shmem": 0, + "slab": 165776, + "slab_reclaimable": 93752, + "slab_unreclaimable": 72024, + "sock": 0, + "thp_collapse_alloc": 0, + "thp_fault_alloc": 0, + "unevictable": 0, + "workingset_activate": 0, + "workingset_nodereclaim": 0, + "workingset_refault": 0 + }, + "limit": 2063048704 + }, + "name": "/sleepy1", + "id": "9b842c47c1c3e4ee931e2c9713cf4e77aa09acc2201aea60fba04b6dbba6c674", + "networks": { + "eth0": { + "rx_bytes": 3608, + "rx_packets": 44, + "rx_errors": 0, + "rx_dropped": 0, + "tx_bytes": 0, + "tx_packets": 0, + "tx_errors": 0, + "tx_dropped": 0 + } + } +} diff --git a/receiver/dockerstatsreceiver/testdata/mock/no_pids_stats/expected_metrics.yaml b/receiver/dockerstatsreceiver/testdata/mock/no_pids_stats/expected_metrics.yaml index 801ada31b154..838e67b56f3b 100644 --- a/receiver/dockerstatsreceiver/testdata/mock/no_pids_stats/expected_metrics.yaml +++ b/receiver/dockerstatsreceiver/testdata/mock/no_pids_stats/expected_metrics.yaml @@ -195,6 +195,14 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{operations}' + - description: CPU shares set for the container. + gauge: + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: container.cpu.shares + unit: "1" - description: Number of periods with throttling active. name: container.cpu.throttling_data.periods sum: @@ -328,12 +336,12 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: ns - - description: 'Percent of CPU used by the container.' + - description: Percent of CPU used by the container. gauge: dataPoints: - asDouble: 0.0002888012543185477 - startTimeUnixNano: "1687762436220246000" - timeUnixNano: "1687762436230155000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: container.cpu.utilization unit: "1" - description: The amount of anonymous memory that has been identified as active by the kernel. @@ -779,10 +787,20 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{packets}' + - description: Number of restarts for the container. + name: container.restarts + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{restarts}' - description: Time elapsed since container start time. gauge: dataPoints: - - asDouble: 3.06813859730868e+07 + - asDouble: 4.03649084666988e+07 startTimeUnixNano: "1000000" timeUnixNano: "2000000" name: container.uptime diff --git a/receiver/dockerstatsreceiver/testdata/mock/pids_stats_max/expected_metrics.yaml b/receiver/dockerstatsreceiver/testdata/mock/pids_stats_max/expected_metrics.yaml index 9c6d8fbfa31a..59f5e428e8d1 100644 --- a/receiver/dockerstatsreceiver/testdata/mock/pids_stats_max/expected_metrics.yaml +++ b/receiver/dockerstatsreceiver/testdata/mock/pids_stats_max/expected_metrics.yaml @@ -104,6 +104,14 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: By + - description: CPU shares set for the container. + gauge: + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: container.cpu.shares + unit: "1" - description: Number of periods with throttling active. name: container.cpu.throttling_data.periods sum: @@ -174,12 +182,12 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: ns - - description: 'Percent of CPU used by the container.' + - description: Percent of CPU used by the container. gauge: dataPoints: - asDouble: 0 - startTimeUnixNano: "1687762436274253000" - timeUnixNano: "1687762436282542000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" name: container.cpu.utilization unit: "1" - description: The amount of anonymous memory that has been identified as active by the kernel. @@ -430,10 +438,20 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" unit: '{pids}' + - description: Number of restarts for the container. + name: container.restarts + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{restarts}' - description: Time elapsed since container start time. gauge: dataPoints: - - asDouble: 5.849208473080516e+06 + - asDouble: 1.5532782694550516e+07 startTimeUnixNano: "1000000" timeUnixNano: "2000000" name: container.uptime diff --git a/receiver/dockerstatsreceiver/testdata/mock/single_container/expected_metrics.yaml b/receiver/dockerstatsreceiver/testdata/mock/single_container/expected_metrics.yaml index 02c317151ac0..bcbb45b86230 100644 --- a/receiver/dockerstatsreceiver/testdata/mock/single_container/expected_metrics.yaml +++ b/receiver/dockerstatsreceiver/testdata/mock/single_container/expected_metrics.yaml @@ -201,6 +201,13 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{operations}' + - description: CPU shares set for the container. + gauge: + dataPoints: + - asInt: "0" + timeUnixNano: "1657771705535206000" + name: container.cpu.shares + unit: "1" - description: Number of periods with throttling active. name: container.cpu.throttling_data.periods sum: @@ -794,6 +801,15 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" unit: '{pids}' + - description: Number of restarts for the container. + name: container.restarts + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + timeUnixNano: "1657771705535206000" + isMonotonic: true + unit: "{restarts}" - description: Time elapsed since container start time. gauge: dataPoints: diff --git a/receiver/dockerstatsreceiver/testdata/mock/single_container_with_optional_resource_attributes/expected_metrics.yaml b/receiver/dockerstatsreceiver/testdata/mock/single_container_with_optional_resource_attributes/expected_metrics.yaml index 52e89413f0ed..4784753987e7 100644 --- a/receiver/dockerstatsreceiver/testdata/mock/single_container_with_optional_resource_attributes/expected_metrics.yaml +++ b/receiver/dockerstatsreceiver/testdata/mock/single_container_with_optional_resource_attributes/expected_metrics.yaml @@ -201,6 +201,14 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{operations}' + - description: CPU shares set for the container. + gauge: + dataPoints: + - asInt: "0" + startTimeUnixNano: "1691752005571543000" + timeUnixNano: "1691752005573769000" + name: container.cpu.shares + unit: "1" - description: Number of periods with throttling active. name: container.cpu.throttling_data.periods sum: @@ -794,6 +802,16 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" unit: '{pids}' + - description: Number of restarts for the container. + name: container.restarts + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1691752005571543000" + timeUnixNano: "1691752005573769000" + isMonotonic: true + unit: '{restarts}' - description: Time elapsed since container start time. gauge: dataPoints: diff --git a/receiver/dockerstatsreceiver/testdata/mock/two_containers/expected_metrics.yaml b/receiver/dockerstatsreceiver/testdata/mock/two_containers/expected_metrics.yaml index fa5d9a5a5285..c3a0788872fc 100644 --- a/receiver/dockerstatsreceiver/testdata/mock/two_containers/expected_metrics.yaml +++ b/receiver/dockerstatsreceiver/testdata/mock/two_containers/expected_metrics.yaml @@ -195,6 +195,13 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{operations}' + - description: CPU shares set for the container. + gauge: + dataPoints: + - asInt: "0" + timeUnixNano: "1657771832637112000" + name: container.cpu.shares + unit: "1" - description: Number of periods with throttling active. name: container.cpu.throttling_data.periods sum: @@ -739,6 +746,15 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" unit: '{pids}' + - description: Number of restarts for the container. + name: container.restarts + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + timeUnixNano: "1657771832637112000" + isMonotonic: true + unit: "{restarts}" - description: Time elapsed since container start time. gauge: dataPoints: @@ -946,6 +962,13 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{operations}' + - description: CPU shares set for the container. + gauge: + dataPoints: + - asInt: "0" + timeUnixNano: "1657771832637093000" + name: container.cpu.shares + unit: "1" - description: Number of periods with throttling active. name: container.cpu.throttling_data.periods sum: @@ -1490,6 +1513,15 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" unit: '{pids}' + - description: Number of restarts for the container. + name: container.restarts + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + timeUnixNano: "1657771832637093000" + isMonotonic: true + unit: "{restarts}" - description: Time elapsed since container start time. gauge: dataPoints: