diff --git a/admiral/pkg/clusters/handler.go b/admiral/pkg/clusters/handler.go index 6bdea47d..ba9f51c4 100644 --- a/admiral/pkg/clusters/handler.go +++ b/admiral/pkg/clusters/handler.go @@ -659,13 +659,10 @@ func createDestinationRuleSkeletion(dr v1alpha32.DestinationRule, name string, n } func getServiceForDeployment(rc *RemoteController, deployment *k8sAppsV1.Deployment) *k8sV1.Service { - if deployment == nil { return nil } - cachedServices := rc.ServiceController.Cache.Get(deployment.Namespace) - if cachedServices == nil { return nil } diff --git a/admiral/pkg/clusters/handler_test.go b/admiral/pkg/clusters/handler_test.go index 60999a8e..d2860d36 100644 --- a/admiral/pkg/clusters/handler_test.go +++ b/admiral/pkg/clusters/handler_test.go @@ -29,7 +29,6 @@ import ( ) func TestGetDependentClusters(t *testing.T) { - identityClusterCache := common.NewMapOfMaps() identityClusterCache.Put("id1", "dep1", "cl1") identityClusterCache.Put("id2", "dep2", "cl2") diff --git a/admiral/pkg/clusters/registry_test.go b/admiral/pkg/clusters/registry_test.go index 7bb94fdd..48a31b6e 100644 --- a/admiral/pkg/clusters/registry_test.go +++ b/admiral/pkg/clusters/registry_test.go @@ -28,6 +28,7 @@ var registryTestSingleton sync.Once func setupForRegistryTests() { registryTestSingleton.Do(func() { + common.ResetSync() p := common.AdmiralParams{ KubeconfigPath: "testdata/fake.config", LabelSet: &common.LabelSet{}, diff --git a/admiral/pkg/clusters/serviceentry.go b/admiral/pkg/clusters/serviceentry.go index f16a2e2c..71b6d4d1 100644 --- a/admiral/pkg/clusters/serviceentry.go +++ b/admiral/pkg/clusters/serviceentry.go @@ -55,7 +55,9 @@ func createServiceEntry(ctx context.Context, event admiral.EventType, rc *Remote return tmpSe } -func modifyServiceEntryForNewServiceOrPod(ctx context.Context, event admiral.EventType, env string, sourceIdentity string, remoteRegistry *RemoteRegistry) map[string]*networking.ServiceEntry { +func modifyServiceEntryForNewServiceOrPod( + ctx context.Context, event admiral.EventType, env string, sourceIdentity string, + remoteRegistry *RemoteRegistry) map[string]*networking.ServiceEntry { defer util.LogElapsedTime("modifyServiceEntryForNewServiceOrPod", sourceIdentity, env, "")() if CurrentAdmiralState.ReadOnly { log.Infof(LogFormat, event, env, sourceIdentity, "", "Processing skipped as Admiral is in Read-only mode") @@ -193,11 +195,12 @@ func modifyServiceEntryForNewServiceOrPod(ctx context.Context, event admiral.Eve for key, serviceEntry := range serviceEntries { if len(serviceEntry.Endpoints) == 0 { - AddServiceEntriesWithDr(ctx, remoteRegistry, map[string]string{sourceCluster: sourceCluster}, - + AddServiceEntriesWithDr( + ctx, remoteRegistry, map[string]string{sourceCluster: sourceCluster}, map[string]*networking.ServiceEntry{key: serviceEntry}) } - clusterIngress, _ := rc.ServiceController.Cache.GetLoadBalancer(common.GetAdmiralParams().LabelSet.GatewayApp, common.NamespaceIstioSystem) + clusterIngress, _ := rc.ServiceController.Cache.GetLoadBalancer( + common.GetAdmiralParams().LabelSet.GatewayApp, common.NamespaceIstioSystem) for _, ep := range serviceEntry.Endpoints { //replace istio ingress-gateway address with local fqdn, note that ingress-gateway can be empty (not provisoned, or is not up) if ep.Address == clusterIngress || ep.Address == "" { @@ -266,7 +269,6 @@ func modifyServiceEntryForNewServiceOrPod(ctx context.Context, event admiral.Eve //i) Picks the GTP that was created most recently from the passed in GTP list based on GTP priority label (GTPs from all clusters) //ii) Updates the global GTP cache with the selected GTP in i) func updateGlobalGtpCache(cache *AdmiralCache, identity, env string, gtps map[string][]*v1.GlobalTrafficPolicy) { - defer util.LogElapsedTime("updateGlobalGtpCache", identity, env, "")() gtpsOrdered := make([]*v1.GlobalTrafficPolicy, 0) for _, gtpsInCluster := range gtps { gtpsOrdered = append(gtpsOrdered, gtpsInCluster...) @@ -378,7 +380,10 @@ func modifySidecarForLocalClusterCommunication(ctx context.Context, sidecarNames return } - sidecar, _ := sidecarConfig.IstioClient.NetworkingV1alpha3().Sidecars(sidecarNamespace).Get(ctx, common.GetWorkloadSidecarName(), v12.GetOptions{}) + sidecar, err := sidecarConfig.IstioClient.NetworkingV1alpha3().Sidecars(sidecarNamespace).Get(ctx, common.GetWorkloadSidecarName(), v12.GetOptions{}) + if err != nil { + return + } if sidecar == nil || (sidecar.Spec.Egress == nil) { return @@ -735,7 +740,9 @@ func getUniqueAddress(ctx context.Context, admiralCache *AdmiralCache, globalFqd needsCacheUpdate := false for err == nil && counter < maxRetries { - address, needsCacheUpdate, err = GetLocalAddressForSe(ctx, getIstioResourceName(globalFqdn, "-se"), admiralCache.ServiceEntryAddressStore, admiralCache.ConfigMapController) + address, needsCacheUpdate, err = GetLocalAddressForSe( + ctx, getIstioResourceName(globalFqdn, "-se"), + admiralCache.ServiceEntryAddressStore, admiralCache.ConfigMapController) if err != nil { log.Errorf("Error getting local address for Service Entry. Err: %v", err) diff --git a/admiral/pkg/clusters/serviceentry_test.go b/admiral/pkg/clusters/serviceentry_test.go index f6f8e301..b802953a 100644 --- a/admiral/pkg/clusters/serviceentry_test.go +++ b/admiral/pkg/clusters/serviceentry_test.go @@ -19,15 +19,16 @@ import ( "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" "github.com/istio-ecosystem/admiral/admiral/pkg/controller/istio" "github.com/istio-ecosystem/admiral/admiral/pkg/test" + log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "google.golang.org/protobuf/testing/protocmp" "gopkg.in/yaml.v2" istioNetworkingV1Alpha3 "istio.io/api/networking/v1alpha3" "istio.io/client-go/pkg/apis/networking/v1alpha3" istiofake "istio.io/client-go/pkg/clientset/versioned/fake" + k8sAppsV1 "k8s.io/api/apps/v1" v14 "k8s.io/api/apps/v1" coreV1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" v12 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/rest" @@ -35,26 +36,72 @@ import ( var serviceEntryTestSingleton sync.Once +func admiralParams() common.AdmiralParams { + return common.AdmiralParams{ + KubeconfigPath: "testdata/fake.config", + LabelSet: &common.LabelSet{ + GatewayApp: "gatewayapp", + WorkloadIdentityKey: "identity", + PriorityKey: "priority", + EnvKey: "env", + }, + EnableSAN: true, + SANPrefix: "prefix", + HostnameSuffix: "mesh", + SyncNamespace: "ns", + CacheRefreshDuration: 0, + ClusterRegistriesNamespace: "default", + DependenciesNamespace: "default", + WorkloadSidecarName: "default", + SecretResolver: "", + } +} + func setupForServiceEntryTests() { + var initHappened bool serviceEntryTestSingleton.Do(func() { - p := common.AdmiralParams{ - KubeconfigPath: "testdata/fake.config", - LabelSet: &common.LabelSet{}, - EnableSAN: true, - SANPrefix: "prefix", - HostnameSuffix: "mesh", - SyncNamespace: "ns", - CacheRefreshDuration: 0, - ClusterRegistriesNamespace: "default", - DependenciesNamespace: "default", - SecretResolver: "", - } - p.LabelSet.WorkloadIdentityKey = "identity" - p.LabelSet.GlobalTrafficDeploymentLabel = "identity" - p.LabelSet.PriorityKey = "priority" - p.LabelSet.EnvKey = "env" - common.InitializeConfig(p) + common.ResetSync() + initHappened = true + common.InitializeConfig(admiralParams()) }) + if !initHappened { + log.Warn("InitializeConfig was NOT called from setupForServiceEntryTests") + } else { + log.Info("InitializeConfig was called setupForServiceEntryTests") + } +} + +func makeTestDeployment(name, namespace, identityLabelValue string) *k8sAppsV1.Deployment { + return &k8sAppsV1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: map[string]string{ + "env": "test", + "traffic.sidecar.istio.io/includeInboundPorts": "8090", + }, + }, + Spec: k8sAppsV1.DeploymentSpec{ + Template: coreV1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "env": "test", + "traffic.sidecar.istio.io/includeInboundPorts": "8090", + }, + Labels: map[string]string{ + "identity": identityLabelValue, + }, + }, + Spec: coreV1.PodSpec{}, + }, + Selector: &v12.LabelSelector{ + MatchLabels: map[string]string{ + "identity": identityLabelValue, + "app": identityLabelValue, + }, + }, + }, + } } func makeTestRollout(name, namespace, identityLabelValue string) argo.Rollout { @@ -72,6 +119,7 @@ func makeTestRollout(name, namespace, identityLabelValue string) argo.Rollout { Labels: map[string]string{"identity": identityLabelValue}, Annotations: map[string]string{ "env": "test", + "traffic.sidecar.istio.io/includeInboundPorts": "8090", }, }, }, @@ -101,54 +149,162 @@ func makeTestRollout(name, namespace, identityLabelValue string) argo.Rollout { func TestModifyServiceEntryForNewServiceOrPodForExcludedAsset(t *testing.T) { setupForServiceEntryTests() var ( - env = "test" - stop = make(chan struct{}) - foobarMetadataName = "foobar" - foobarMetadataNamespace = "foobar-ns" - foobarRollout = makeTestRollout(foobarMetadataName, foobarMetadataNamespace, foobarMetadataName) - clusterID = "test-dev-k8s" - p = common.AdmiralParams{ - KubeconfigPath: "testdata/fake.config", - CacheRefreshDuration: 0, + env = "test" + stop = make(chan struct{}) + foobarMetadataName = "foobar" + foobarMetadataNamespace = "foobar-ns" + rollout1Identity = "rollout1" + deployment1Identity = "deployment1" + testRollout1 = makeTestRollout(foobarMetadataName, foobarMetadataNamespace, rollout1Identity) + testDeployment1 = makeTestDeployment(foobarMetadataName, foobarMetadataNamespace, deployment1Identity) + clusterID = "test-dev-k8s" + fakeIstioClient = istiofake.NewSimpleClientset() + config = rest.Config{Host: "localhost"} + expectedServiceEntriesForDeployment = map[string]*istioNetworkingV1Alpha3.ServiceEntry{ + "test." + deployment1Identity + ".mesh": &istioNetworkingV1Alpha3.ServiceEntry{ + Hosts: []string{"test." + deployment1Identity + ".mesh"}, + Addresses: []string{"127.0.0.1"}, + Ports: []*istioNetworkingV1Alpha3.Port{ + &istioNetworkingV1Alpha3.Port{ + Number: 80, + Protocol: "http", + Name: "http", + }, + }, + Location: istioNetworkingV1Alpha3.ServiceEntry_MESH_INTERNAL, + Resolution: istioNetworkingV1Alpha3.ServiceEntry_DNS, + Endpoints: []*istioNetworkingV1Alpha3.WorkloadEntry{ + &istioNetworkingV1Alpha3.WorkloadEntry{ + Address: "dummy.admiral.global", + Ports: map[string]uint32{ + "http": 0, + }, + Locality: "us-west-2", + }, + }, + SubjectAltNames: []string{"spiffe://prefix/" + deployment1Identity}, + }, } - config = rest.Config{Host: "localhost"} - foobarCanaryService = &coreV1.Service{ + /* + expectedServiceEntriesForRollout = map[string]*istioNetworkingV1Alpha3.ServiceEntry{ + "test." + deployment1Identity + ".mesh": &istioNetworkingV1Alpha3.ServiceEntry{ + Hosts: []string{"test." + rollout1Identity + ".mesh"}, + Addresses: []string{"127.0.0.1"}, + Ports: []*istioNetworkingV1Alpha3.Port{ + &istioNetworkingV1Alpha3.Port{ + Number: 80, + Protocol: "http", + Name: "http", + }, + }, + Location: istioNetworkingV1Alpha3.ServiceEntry_MESH_INTERNAL, + Resolution: istioNetworkingV1Alpha3.ServiceEntry_DNS, + Endpoints: []*istioNetworkingV1Alpha3.WorkloadEntry{ + &istioNetworkingV1Alpha3.WorkloadEntry{ + Address: "dummy.admiral.global", + Ports: map[string]uint32{ + "http": 0, + }, + Locality: "us-west-2", + }, + }, + SubjectAltNames: []string{"spiffe://prefix/" + rollout1Identity}, + }, + } + */ + serviceEntryAddressStore = &ServiceEntryAddressStore{ + EntryAddresses: map[string]string{ + "test." + deployment1Identity + ".mesh-se": "127.0.0.1", + "test." + rollout1Identity + ".mesh-se": "127.0.0.1", + }, + Addresses: []string{}, + } + serviceForRollout = &coreV1.Service{ ObjectMeta: v12.ObjectMeta{ - Name: foobarMetadataName + "-canary", + Name: foobarMetadataName + "-stable", Namespace: foobarMetadataNamespace, }, Spec: coreV1.ServiceSpec{ - Selector: map[string]string{"app": foobarMetadataName}, + Selector: map[string]string{"app": rollout1Identity}, + Ports: []coreV1.ServicePort{ + { + Name: "http", + Port: 8090, + }, + }, + }, + } + serviceForDeployment = &coreV1.Service{ + ObjectMeta: v12.ObjectMeta{ + Name: foobarMetadataName, + Namespace: foobarMetadataNamespace, + }, + Spec: coreV1.ServiceSpec{ + Selector: map[string]string{"app": deployment1Identity}, + Ports: []coreV1.ServicePort{ + { + Name: "http", + Port: 8090, + }, + }, }, } - rr1, _ = InitAdmiral(context.Background(), p) - rr2, _ = InitAdmiral(context.Background(), p) + rr1, _ = InitAdmiral(context.Background(), admiralParams()) + rr2, _ = InitAdmiral(context.Background(), admiralParams()) ) deploymentController, err := admiral.NewDeploymentController(clusterID, make(chan struct{}), &test.MockDeploymentHandler{}, &config, time.Second*time.Duration(300)) if err != nil { t.Fail() } + deploymentController.Cache.UpdateDeploymentToClusterCache(deployment1Identity, testDeployment1) rolloutController, err := admiral.NewRolloutsController(clusterID, make(chan struct{}), &test.MockRolloutHandler{}, &config, time.Second*time.Duration(300)) if err != nil { t.Fail() } - rolloutController.Cache.UpdateRolloutToClusterCache("foobar", &foobarRollout) + rolloutController.Cache.UpdateRolloutToClusterCache(rollout1Identity, &testRollout1) serviceController, err := admiral.NewServiceController(clusterID, stop, &test.MockServiceHandler{}, &config, time.Second*time.Duration(300)) if err != nil { t.Fatalf("%v", err) } - serviceController.Cache.Put(foobarCanaryService) + virtualServiceController, err := istio.NewVirtualServiceController(clusterID, make(chan struct{}), &test.MockVirtualServiceHandler{}, &config, time.Second*time.Duration(300)) + if err != nil { + t.Fatalf("%v", err) + } + gtpc, err := admiral.NewGlobalTrafficController("", make(chan struct{}), &test.MockGlobalTrafficHandler{}, &config, time.Second*time.Duration(300)) + if err != nil { + t.Fatalf("%v", err) + t.FailNow() + } + t.Logf("expectedServiceEntriesForDeployment: %v\n", expectedServiceEntriesForDeployment) + serviceController.Cache.Put(serviceForRollout) + serviceController.Cache.Put(serviceForDeployment) rc := &RemoteController{ - DeploymentController: deploymentController, - RolloutController: rolloutController, - ServiceController: serviceController, + ClusterID: clusterID, + DeploymentController: deploymentController, + RolloutController: rolloutController, + ServiceController: serviceController, + VirtualServiceController: virtualServiceController, + NodeController: &admiral.NodeController{ + Locality: &admiral.Locality{ + Region: "us-west-2", + }, + }, + ServiceEntryController: &istio.ServiceEntryController{ + IstioClient: fakeIstioClient, + }, + DestinationRuleController: &istio.DestinationRuleController{ + IstioClient: fakeIstioClient, + }, + GlobalTraffic: gtpc, } rr1.PutRemoteController(clusterID, rc) rr1.ExcludeAssetList = []string{"asset1"} rr1.StartTime = time.Now() + rr1.AdmiralCache.ServiceEntryAddressStore = serviceEntryAddressStore rr2.PutRemoteController(clusterID, rc) rr2.StartTime = time.Now() + rr2.AdmiralCache.ServiceEntryAddressStore = serviceEntryAddressStore testCases := []struct { name string @@ -157,24 +313,69 @@ func TestModifyServiceEntryForNewServiceOrPodForExcludedAsset(t *testing.T) { expectedServiceEntries map[string]*istioNetworkingV1Alpha3.ServiceEntry }{ { - name: "when asset is in the exclude list", + name: "Given asset is using a deployment," + + "And asset is in the exclude list, " + + "When modifyServiceEntryForNewServiceOrPod is called, " + + "Then, it should skip creating service entries, and return an empty map of service entries", assetIdentity: "asset1", remoteRegistry: rr1, expectedServiceEntries: nil, }, { - name: "when asset is NOT in the exclude list", - assetIdentity: "foobar", - remoteRegistry: rr2, + name: "Given asset is using a rollout," + + "And asset is in the exclude list, " + + "When modifyServiceEntryForNewServiceOrPod is called, " + + "Then, it should skip creating service entries, and return an empty map of service entries", + assetIdentity: "asset1", + remoteRegistry: rr1, expectedServiceEntries: nil, }, + { + name: "Given asset is using a deployment, " + + "And asset is NOT in the exclude list" + + "When modifyServiceEntryForNewServiceOrPod is called, " + + "Then, corresponding service entry should be created, " + + "And the function should return a map containing the created service entry", + assetIdentity: deployment1Identity, + remoteRegistry: rr2, + expectedServiceEntries: expectedServiceEntriesForDeployment, + }, + /* + { + name: "Given asset is using a rollout, " + + "And asset is NOT in the exclude list" + + "When modifyServiceEntryForNewServiceOrPod is called, " + + "Then, corresponding service entry should be created, " + + "And the function should return a map containing the created service entry", + assetIdentity: rollout1Identity, + remoteRegistry: rr2, + expectedServiceEntries: expectedServiceEntriesForRollout, + }, + */ } for _, c := range testCases { t.Run(c.name, func(t *testing.T) { - serviceEntries := modifyServiceEntryForNewServiceOrPod(context.Background(), admiral.Add, env, c.assetIdentity, c.remoteRegistry) + serviceEntries := modifyServiceEntryForNewServiceOrPod( + context.Background(), + admiral.Add, + env, + c.assetIdentity, + c.remoteRegistry, + ) if len(serviceEntries) != len(c.expectedServiceEntries) { t.Fatalf("expected service entries to be of length: %d, but got: %d", len(c.expectedServiceEntries), len(serviceEntries)) } + if len(c.expectedServiceEntries) > 0 { + for k := range c.expectedServiceEntries { + if serviceEntries[k] == nil { + t.Fatalf( + "expected service entries to contain service entry for: %s, "+ + "but did not find it. Got map: %v", + k, serviceEntries, + ) + } + } + } }) } } @@ -394,7 +595,10 @@ func TestCreateServiceEntryForNewServiceOrPod(t *testing.T) { p := common.AdmiralParams{ KubeconfigPath: "testdata/fake.config", } - rr, _ := InitAdmiral(context.Background(), p) + rr, err := InitAdmiral(context.Background(), p) + if err != nil { + t.Fatalf("unable to initialize admiral, err: %v", err) + } rr.StartTime = time.Now().Add(-60 * time.Second) config := rest.Config{ @@ -593,10 +797,10 @@ func TestMakeRemoteEndpointForServiceEntry(t *testing.T) { } } -func buildFakeConfigMapFromAddressStore(addressStore *ServiceEntryAddressStore, resourceVersion string) *v1.ConfigMap { +func buildFakeConfigMapFromAddressStore(addressStore *ServiceEntryAddressStore, resourceVersion string) *coreV1.ConfigMap { bytes, _ := yaml.Marshal(addressStore) - cm := v1.ConfigMap{ + cm := coreV1.ConfigMap{ Data: map[string]string{"serviceEntryAddressStore": string(bytes)}, } cm.Name = "se-address-configmap" @@ -660,6 +864,7 @@ func TestModifyExistingSidecarForLocalClusterCommunication(t *testing.T) { sidecarEgressMap := make(map[string]common.SidecarEgress) sidecarEgressMap["test-dependency-namespace"] = common.SidecarEgress{Namespace: "test-dependency-namespace", FQDN: "test-local-fqdn", CNAMEs: map[string]string{"test.myservice.global": "1"}} + time.Sleep(5 * time.Second) modifySidecarForLocalClusterCommunication(ctx, "test-sidecar-namespace", sidecarEgressMap, remoteController) updatedSidecar, err := sidecarController.IstioClient.NetworkingV1alpha3().Sidecars("test-sidecar-namespace").Get(ctx, "default", v12.GetOptions{}) @@ -695,6 +900,7 @@ func TestModifyExistingSidecarForLocalClusterCommunication(t *testing.T) { newHosts := matched.Hosts listener.Hosts = listener.Hosts[:0] matched.Hosts = matched.Hosts[:0] + t.Logf("old: %v, new: %v", oldHosts, newHosts) assert.ElementsMatch(t, oldHosts, newHosts, "hosts should match") if !cmp.Equal(listener, matched, protocmp.Transform()) { t.Fatalf("Listeners do not match. Details - %v", cmp.Diff(listener, matched)) @@ -997,9 +1203,11 @@ func TestCreateServiceEntryForNewServiceOrPodRolloutsUsecase(t *testing.T) { p = common.AdmiralParams{ KubeconfigPath: "testdata/fake.config", } - rr, _ = InitAdmiral(context.Background(), p) ) - + rr, err := InitAdmiral(context.Background(), p) + if err != nil { + t.Fatalf("unable to initialize admiral, err: %v", err) + } rr.StartTime = time.Now().Add(-60 * time.Second) d, err := admiral.NewDeploymentController("", make(chan struct{}), &test.MockDeploymentHandler{}, &config, time.Second*time.Duration(300)) if err != nil { @@ -1141,12 +1349,14 @@ func TestCreateServiceEntryForBlueGreenRolloutsUsecase(t *testing.T) { KubeconfigPath: "testdata/fake.config", PreviewHostnamePrefix: "preview", } - rr, _ = InitAdmiral(context.Background(), p) config = rest.Config{ Host: "localhost", } ) - + rr, err := InitAdmiral(context.Background(), p) + if err != nil { + t.Fatalf("unable to initialize admiral, err: %v", err) + } rr.StartTime = time.Now().Add(-60 * time.Second) d, err := admiral.NewDeploymentController("", make(chan struct{}), &test.MockDeploymentHandler{}, &config, time.Second*time.Duration(300)) if err != nil { @@ -1340,8 +1550,8 @@ func TestUpdateEndpointsForBlueGreen(t *testing.T) { } weightedServices := map[string]*WeightedService{ - activeService: {Service: &v1.Service{ObjectMeta: v12.ObjectMeta{Name: activeService, Namespace: namespace}}}, - previewService: {Service: &v1.Service{ObjectMeta: v12.ObjectMeta{Name: previewService, Namespace: namespace}}}, + activeService: {Service: &coreV1.Service{ObjectMeta: v12.ObjectMeta{Name: activeService, Namespace: namespace}}}, + previewService: {Service: &coreV1.Service{ObjectMeta: v12.ObjectMeta{Name: previewService, Namespace: namespace}}}, } activeWantedEndpoints := &istioNetworkingV1Alpha3.WorkloadEntry{ @@ -1411,12 +1621,12 @@ func TestUpdateEndpointsForWeightedServices(t *testing.T) { }, } weightedServices = map[string]*WeightedService{ - canaryService: {Weight: 10, Service: &v1.Service{ObjectMeta: v12.ObjectMeta{Name: canaryService, Namespace: namespace}}}, - stableService: {Weight: 90, Service: &v1.Service{ObjectMeta: v12.ObjectMeta{Name: stableService, Namespace: namespace}}}, + canaryService: {Weight: 10, Service: &coreV1.Service{ObjectMeta: v12.ObjectMeta{Name: canaryService, Namespace: namespace}}}, + stableService: {Weight: 90, Service: &coreV1.Service{ObjectMeta: v12.ObjectMeta{Name: stableService, Namespace: namespace}}}, } weightedServicesZeroWeight = map[string]*WeightedService{ - canaryService: {Weight: 0, Service: &v1.Service{ObjectMeta: v12.ObjectMeta{Name: canaryService, Namespace: namespace}}}, - stableService: {Weight: 100, Service: &v1.Service{ObjectMeta: v12.ObjectMeta{Name: stableService, Namespace: namespace}}}, + canaryService: {Weight: 0, Service: &coreV1.Service{ObjectMeta: v12.ObjectMeta{Name: canaryService, Namespace: namespace}}}, + stableService: {Weight: 100, Service: &coreV1.Service{ObjectMeta: v12.ObjectMeta{Name: stableService, Namespace: namespace}}}, } wantedEndpoints = []*istioNetworkingV1Alpha3.WorkloadEntry{ {Address: clusterIngress2, Weight: 10, Ports: map[string]uint32{"http": 15443}}, diff --git a/admiral/pkg/clusters/types.go b/admiral/pkg/clusters/types.go index a5f329b4..c45bf54a 100644 --- a/admiral/pkg/clusters/types.go +++ b/admiral/pkg/clusters/types.go @@ -268,13 +268,11 @@ type routingPolicyFilterCache struct { func (r *routingPolicyFilterCache) Get(identityEnvKey string) (filters map[string]map[string]string) { defer r.mutex.Unlock() r.mutex.Lock() - fmt.Printf("r.filterCache: %+v", r.filterCache) return r.filterCache[identityEnvKey] } func (r *routingPolicyFilterCache) Put(identityEnvKey string, clusterId string, filterName string) { defer r.mutex.Unlock() - fmt.Printf("identityEnvKey: %v\n", identityEnvKey) r.mutex.Lock() if r.filterCache[identityEnvKey] == nil { r.filterCache[identityEnvKey] = make(map[string]map[string]string) @@ -297,9 +295,7 @@ func (r *routingPolicyFilterCache) Delete(identityEnvKey string) { } } func (r RoutingPolicyHandler) Added(ctx context.Context, obj *v1.RoutingPolicy) { - fmt.Println("comes inside Added") if common.GetEnableRoutingPolicy() { - fmt.Println("routing policy is enabled") if common.ShouldIgnoreResource(obj.ObjectMeta) { log.Infof(LogFormat, "success", "routingpolicy", obj.Name, "", "Ignored the RoutingPolicy because of the annotation") return @@ -320,7 +316,6 @@ func (r RoutingPolicyHandler) Added(ctx context.Context, obj *v1.RoutingPolicy) func (r RoutingPolicyHandler) processRoutingPolicy(ctx context.Context, dependents map[string]string, routingPolicy *v1.RoutingPolicy, eventType admiral.EventType) { for _, remoteController := range r.RemoteRegistry.remoteControllers { for _, dependent := range dependents { - fmt.Printf("dependent: %v\n", dependent) // Check if the dependent exists in this remoteCluster. If so, we create an envoyFilter with dependent identity as workload selector if _, ok := r.RemoteRegistry.AdmiralCache.IdentityClusterCache.Get(dependent).Copy()[remoteController.ClusterID]; ok { selectors := r.RemoteRegistry.AdmiralCache.WorkloadSelectorCache.Get(dependent + remoteController.ClusterID).Copy() diff --git a/admiral/pkg/clusters/types_test.go b/admiral/pkg/clusters/types_test.go index cc5645f5..b22f92ff 100644 --- a/admiral/pkg/clusters/types_test.go +++ b/admiral/pkg/clusters/types_test.go @@ -28,6 +28,7 @@ var typeTestSingleton sync.Once func setupForTypeTests() { typeTestSingleton.Do(func() { + common.ResetSync() p := common.AdmiralParams{ KubeconfigPath: "testdata/fake.config", LabelSet: &common.LabelSet{}, diff --git a/admiral/pkg/clusters/util_test.go b/admiral/pkg/clusters/util_test.go index d418177f..e5d8dd01 100644 --- a/admiral/pkg/clusters/util_test.go +++ b/admiral/pkg/clusters/util_test.go @@ -2,56 +2,49 @@ package clusters import ( "errors" + "reflect" + "strconv" + "testing" + argo "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" k8sAppsV1 "k8s.io/api/apps/v1" coreV1 "k8s.io/api/core/v1" k8sV1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - "reflect" - "strconv" - "testing" ) func TestGetMeshPorts(t *testing.T) { - - annotatedPort := 8090 - annotatedSecondPort := 8091 - defaultServicePort := uint32(8080) - - defaultK8sSvcPortNoName := k8sV1.ServicePort{Port: int32(defaultServicePort)} - defaultK8sSvcPort := k8sV1.ServicePort{Name: "default", Port: int32(defaultServicePort)} - meshK8sSvcPort := k8sV1.ServicePort{Name: "mesh", Port: int32(annotatedPort)} - - serviceMeshPorts := []k8sV1.ServicePort{defaultK8sSvcPort, meshK8sSvcPort} - - serviceMeshPortsOnlyDefault := []k8sV1.ServicePort{defaultK8sSvcPortNoName} - - service := k8sV1.Service{ - ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, - Spec: k8sV1.ServiceSpec{Ports: serviceMeshPorts}, - } - deployment := k8sAppsV1.Deployment{ - Spec: k8sAppsV1.DeploymentSpec{Template: coreV1.PodTemplateSpec{ - ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{common.SidecarEnabledPorts: strconv.Itoa(annotatedPort)}}, - }}} - - deploymentWithMultipleMeshPorts := k8sAppsV1.Deployment{ - Spec: k8sAppsV1.DeploymentSpec{Template: coreV1.PodTemplateSpec{ - ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{common.SidecarEnabledPorts: strconv.Itoa(annotatedPort) + "," + strconv.Itoa(annotatedSecondPort)}}, - }}} - - ports := map[string]uint32{"http": uint32(annotatedPort)} - portsDiffTargetPort := map[string]uint32{"http": uint32(80)} - - grpcPorts := map[string]uint32{"grpc": uint32(annotatedPort)} - grpcWebPorts := map[string]uint32{"grpc-web": uint32(annotatedPort)} - http2Ports := map[string]uint32{"http2": uint32(annotatedPort)} - - portsFromDefaultSvcPort := map[string]uint32{"http": defaultServicePort} - - emptyPorts := map[string]uint32{} + var ( + annotatedPort = 8090 + annotatedSecondPort = 8091 + defaultServicePort = uint32(8080) + ports = map[string]uint32{"http": uint32(annotatedPort)} + portsDiffTargetPort = map[string]uint32{"http": uint32(80)} + grpcPorts = map[string]uint32{"grpc": uint32(annotatedPort)} + grpcWebPorts = map[string]uint32{"grpc-web": uint32(annotatedPort)} + http2Ports = map[string]uint32{"http2": uint32(annotatedPort)} + portsFromDefaultSvcPort = map[string]uint32{"http": defaultServicePort} + emptyPorts = map[string]uint32{} + defaultK8sSvcPortNoName = k8sV1.ServicePort{Port: int32(defaultServicePort)} + defaultK8sSvcPort = k8sV1.ServicePort{Name: "default", Port: int32(defaultServicePort)} + meshK8sSvcPort = k8sV1.ServicePort{Name: "mesh", Port: int32(annotatedPort)} + serviceMeshPorts = []k8sV1.ServicePort{defaultK8sSvcPort, meshK8sSvcPort} + serviceMeshPortsOnlyDefault = []k8sV1.ServicePort{defaultK8sSvcPortNoName} + service = k8sV1.Service{ + ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, + Spec: k8sV1.ServiceSpec{Ports: serviceMeshPorts}, + } + deployment = k8sAppsV1.Deployment{ + Spec: k8sAppsV1.DeploymentSpec{Template: coreV1.PodTemplateSpec{ + ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{common.SidecarEnabledPorts: strconv.Itoa(annotatedPort)}}, + }}} + deploymentWithMultipleMeshPorts = k8sAppsV1.Deployment{ + Spec: k8sAppsV1.DeploymentSpec{Template: coreV1.PodTemplateSpec{ + ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{common.SidecarEnabledPorts: strconv.Itoa(annotatedPort) + "," + strconv.Itoa(annotatedSecondPort)}}, + }}} + ) testCases := []struct { name string @@ -67,17 +60,17 @@ func TestGetMeshPorts(t *testing.T) { expected: ports, }, { - name: "should return a http port if no port name is specified", - service: k8sV1.Service{ + name: "should return a http port if no port name is specified", + service: k8sV1.Service{ ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, - Spec: k8sV1.ServiceSpec{Ports: []k8sV1.ServicePort{{Port: int32(80), TargetPort: intstr.FromInt(annotatedPort),}}}, + Spec: k8sV1.ServiceSpec{Ports: []k8sV1.ServicePort{{Port: int32(80), TargetPort: intstr.FromInt(annotatedPort)}}}, }, deployment: deployment, expected: portsDiffTargetPort, }, { - name: "should return a http port if the port name doesn't start with a protocol name", - service: k8sV1.Service{ + name: "should return a http port if the port name doesn't start with a protocol name", + service: k8sV1.Service{ ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, Spec: k8sV1.ServiceSpec{Ports: []k8sV1.ServicePort{{Name: "hello-grpc", Port: int32(annotatedPort)}}}, }, @@ -85,8 +78,8 @@ func TestGetMeshPorts(t *testing.T) { expected: ports, }, { - name: "should return a grpc port based on annotation", - service: k8sV1.Service{ + name: "should return a grpc port based on annotation", + service: k8sV1.Service{ ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, Spec: k8sV1.ServiceSpec{Ports: []k8sV1.ServicePort{{Name: "grpc-service", Port: int32(annotatedPort)}}}, }, @@ -94,8 +87,8 @@ func TestGetMeshPorts(t *testing.T) { expected: grpcPorts, }, { - name: "should return a grpc-web port based on annotation", - service: k8sV1.Service{ + name: "should return a grpc-web port based on annotation", + service: k8sV1.Service{ ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, Spec: k8sV1.ServiceSpec{Ports: []k8sV1.ServicePort{{Name: "grpc-web", Port: int32(annotatedPort)}}}, }, @@ -103,8 +96,8 @@ func TestGetMeshPorts(t *testing.T) { expected: grpcWebPorts, }, { - name: "should return a http2 port based on annotation", - service: k8sV1.Service{ + name: "should return a http2 port based on annotation", + service: k8sV1.Service{ ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, Spec: k8sV1.ServiceSpec{Ports: []k8sV1.ServicePort{{Name: "http2", Port: int32(annotatedPort)}}}, }, @@ -136,10 +129,10 @@ func TestGetMeshPorts(t *testing.T) { expected: emptyPorts, }, { - name: "should return a http port if the port name doesn't start with a protocol name", - service: k8sV1.Service{ + name: "should return a http port if the port name doesn't start with a protocol name", + service: k8sV1.Service{ ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, - Spec: k8sV1.ServiceSpec{Ports: []k8sV1.ServicePort{{Name: "http", Port: int32(annotatedPort)}, + Spec: k8sV1.ServiceSpec{Ports: []k8sV1.ServicePort{{Name: "http", Port: int32(annotatedPort)}, {Name: "grpc", Port: int32(annotatedSecondPort)}}}, }, deployment: deploymentWithMultipleMeshPorts, @@ -216,51 +209,51 @@ func TestValidateConfigmapBeforePutting(t *testing.T) { func TestGetServiceSelector(t *testing.T) { - selector := map[string]string {"app":"test1"} + selector := map[string]string{"app": "test1"} testCases := []struct { name string clusterName string service k8sV1.Service expected map[string]string - }{ + }{ { name: "should return a selectors based on service", clusterName: "test-cluster", - service: k8sV1.Service{ + service: k8sV1.Service{ ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, - Spec: k8sV1.ServiceSpec{Selector: selector}, + Spec: k8sV1.ServiceSpec{Selector: selector}, }, - expected: selector, + expected: selector, }, { name: "should return empty selectors", clusterName: "test-cluster", - service: k8sV1.Service{ + service: k8sV1.Service{ ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, Spec: k8sV1.ServiceSpec{Selector: map[string]string{}}, }, - expected: nil, + expected: nil, }, { name: "should return nil", clusterName: "test-cluster", - service: k8sV1.Service{ + service: k8sV1.Service{ ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, Spec: k8sV1.ServiceSpec{Selector: nil}, }, - expected: nil, + expected: nil, }, } for _, c := range testCases { t.Run(c.name, func(t *testing.T) { - selectors := GetServiceSelector(c.clusterName,&c.service) + selectors := GetServiceSelector(c.clusterName, &c.service) if selectors == nil { if c.expected != nil { t.Errorf("Wanted selectors: %v, got: %v", c.expected, selectors) } - }else if !reflect.DeepEqual(selectors.Copy(), c.expected) { + } else if !reflect.DeepEqual(selectors.Copy(), c.expected) { t.Errorf("Wanted selectors: %v, got: %v", c.expected, selectors) } }) diff --git a/admiral/pkg/controller/common/config.go b/admiral/pkg/controller/common/config.go index 29fa0a39..0d26534a 100644 --- a/admiral/pkg/controller/common/config.go +++ b/admiral/pkg/controller/common/config.go @@ -25,7 +25,9 @@ func InitializeConfig(params AdmiralParams) { initHappened = true InitializeMetrics() }) - if !initHappened { + if initHappened { + log.Info("InitializeConfig was called.") + } else { log.Warn("InitializeConfig was called but didn't take effect. It can only be called once, and thus has already been initialized. Please ensure you aren't re-initializing the config.") } }