Skip to content

Commit

Permalink
fix: DiskState parsing error (banzaicloud#31)
Browse files Browse the repository at this point in the history
  • Loading branch information
chrisgacsal authored and amuraru committed Dec 12, 2023
1 parent e170504 commit 1732b6b
Show file tree
Hide file tree
Showing 3 changed files with 58 additions and 5 deletions.
39 changes: 39 additions & 0 deletions deploy/cruisecontrol/capacityJBOD.json
Original file line number Diff line number Diff line change
@@ -1,5 +1,44 @@
{
"brokerCapacities":[
{
"brokerId": "0",
"capacity": {
"DISK": {
"/var/lib/kafka/data0": "100000",
"/var/lib/kafka/data1": "100000"
},
"CPU": "100",
"NW_IN": "10000",
"NW_OUT": "10000"
},
"doc": "The default capacity for a broker with multiple logDirs each on a separate heterogeneous disk."
},
{
"brokerId": "1",
"capacity": {
"DISK": {
"/var/lib/kafka/data0": "100000",
"/var/lib/kafka/data1": "100000"
},
"CPU": "100",
"NW_IN": "10000",
"NW_OUT": "10000"
},
"doc": "The default capacity for a broker with multiple logDirs each on a separate heterogeneous disk."
},
{
"brokerId": "2",
"capacity": {
"DISK": {
"/var/lib/kafka/data0": "100000",
"/var/lib/kafka/data1": "100000"
},
"CPU": "100",
"NW_IN": "10000",
"NW_OUT": "10000"
},
"doc": "The default capacity for a broker with multiple logDirs each on a separate heterogeneous disk."
},
{
"brokerId": "0",
"capacity": {
Expand Down
20 changes: 17 additions & 3 deletions integration_test/kafka_cluster_load_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,11 @@ limitations under the License.
package integration_test

import (
"github.com/banzaicloud/go-cruise-control/integration_test/helpers"
"github.com/banzaicloud/go-cruise-control/pkg/api"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"

"github.com/banzaicloud/go-cruise-control/integration_test/helpers"
"github.com/banzaicloud/go-cruise-control/pkg/api"
)

var _ = Describe("Kafka Cluster Load", Label("api:load", "api:state"), func() {
Expand Down Expand Up @@ -65,6 +66,11 @@ var _ = Describe("Kafka Cluster Load", Label("api:load", "api:state"), func() {
"rack-2",
},
}

expectedBrokerDisks = []string{
"/var/lib/kafka/data0",
"/var/lib/kafka/data1",
}
)

BeforeEach(func(ctx SpecContext) {
Expand All @@ -80,7 +86,10 @@ var _ = Describe("Kafka Cluster Load", Label("api:load", "api:state"), func() {
Context("for the last hour", func() {
It("should result no errors", func(ctx SpecContext) {
By("requesting load information")
req := api.KafkaClusterLoadRequestWithDefaults()
req := &api.KafkaClusterLoadRequest{
AllowCapacityEstimation: true,
PopulateDiskInfo: true,
}
resp, err := cruisecontrol.KafkaClusterLoad(ctx, req)
Expect(err).NotTo(HaveOccurred())
Expect(resp.Failed()).To(BeFalse())
Expand All @@ -94,6 +103,11 @@ var _ = Describe("Kafka Cluster Load", Label("api:load", "api:state"), func() {
Expect(ok).To(BeTrue())
Expect(broker.Host).To(Equal(expectedBroker.Host))
Expect(broker.Rack).To(Equal(expectedBroker.Rack))

for _, disk := range expectedBrokerDisks {
_, ok := broker.DiskState[disk]
Expect(ok).To(BeTrue())
}
}

By("getting host load statistics")
Expand Down
4 changes: 2 additions & 2 deletions makefile.d/50-deploy.mk
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ start: ## Spin up local development environment
up \
--detach \
--remove-orphans \
--timeout $(DOCKER_COMPOSE_TIMEOUT) \
-t $(DOCKER_COMPOSE_TIMEOUT) \
--wait

.PHONY: stop
Expand All @@ -28,4 +28,4 @@ stop: ## Stop local development environment
down \
--remove-orphans \
--volumes \
--timeout $(DOCKER_COMPOSE_TIMEOUT)
-t $(DOCKER_COMPOSE_TIMEOUT)

0 comments on commit 1732b6b

Please sign in to comment.