From 29543c6001394483f2d4a3e9e690cecf483c79b1 Mon Sep 17 00:00:00 2001 From: Anubhav Aeron Date: Fri, 26 Aug 2022 09:17:27 -0700 Subject: [PATCH 1/3] log error, instead of returning when vs deletion fails Signed-off-by: Anubhav Aeron --- admiral/pkg/clusters/handler.go | 197 +++++++++++----------- admiral/pkg/clusters/serviceentry.go | 86 ++++------ admiral/pkg/clusters/serviceentry_test.go | 22 ++- 3 files changed, 143 insertions(+), 162 deletions(-) diff --git a/admiral/pkg/clusters/handler.go b/admiral/pkg/clusters/handler.go index 6bdea47d..36e37f23 100644 --- a/admiral/pkg/clusters/handler.go +++ b/admiral/pkg/clusters/handler.go @@ -8,21 +8,23 @@ import ( "strings" "time" - argo "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" - "github.com/golang/protobuf/ptypes/duration" - "github.com/golang/protobuf/ptypes/wrappers" - "github.com/google/go-cmp/cmp" "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/model" v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1" "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral" "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" "github.com/istio-ecosystem/admiral/admiral/pkg/controller/util" + + argo "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + "github.com/golang/protobuf/ptypes/duration" + "github.com/golang/protobuf/ptypes/wrappers" + "github.com/google/go-cmp/cmp" log "github.com/sirupsen/logrus" "google.golang.org/protobuf/testing/protocmp" v1alpha32 "istio.io/api/networking/v1alpha3" "istio.io/client-go/pkg/apis/networking/v1alpha3" k8sAppsV1 "k8s.io/api/apps/v1" k8sV1 "k8s.io/api/core/v1" + k8sErrors "k8s.io/apimachinery/pkg/api/errors" v12 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -32,26 +34,35 @@ const ( DefaultInterval int64 = 60 ) +// ServiceEntryHandler responsible for handling Add/Update/Delete events for +// ServiceEntry resources type ServiceEntryHandler struct { RemoteRegistry *RemoteRegistry ClusterID string } +// DestinationRuleHandler responsible for handling Add/Update/Delete events for +// DestinationRule resources type DestinationRuleHandler struct { RemoteRegistry *RemoteRegistry ClusterID string } +// VirtualServiceHandler responsible for handling Add/Update/Delete events for +// VirtualService resources type VirtualServiceHandler struct { RemoteRegistry *RemoteRegistry ClusterID string } +// SidecarHandler responsible for handling Add/Update/Delete events for +// Sidecar resources type SidecarHandler struct { RemoteRegistry *RemoteRegistry ClusterID string } +// WeightedService utility to store weighted services for argo rollouts type WeightedService struct { Weight int32 Service *k8sV1.Service @@ -69,10 +80,13 @@ func getIstioResourceName(host string, suffix string) string { } func getDestinationRule(se *v1alpha32.ServiceEntry, locality string, gtpTrafficPolicy *model.TrafficPolicy) *v1alpha32.DestinationRule { - var dr = &v1alpha32.DestinationRule{} + var ( + processGtp = true + dr = &v1alpha32.DestinationRule{} + ) dr.Host = se.Hosts[0] dr.TrafficPolicy = &v1alpha32.TrafficPolicy{Tls: &v1alpha32.ClientTLSSettings{Mode: v1alpha32.ClientTLSSettings_ISTIO_MUTUAL}} - processGtp := true + if len(locality) == 0 { log.Warnf(LogErrFormat, "Process", "GlobalTrafficPolicy", dr.Host, "", "Skipping gtp processing, locality of the cluster nodes cannot be determined. Is this minikube?") processGtp = false @@ -84,7 +98,6 @@ func getDestinationRule(se *v1alpha32.ServiceEntry, locality string, gtpTrafficP if len(gtpTrafficPolicy.Target) > 0 { var localityLbSettings = &v1alpha32.LocalityLoadBalancerSetting{} - if gtpTrafficPolicy.LbType == model.TrafficPolicy_FAILOVER { distribute := make([]*v1alpha32.LocalityLoadBalancerSetting_Distribute, 0) targetTrafficMap := make(map[string]uint32) @@ -110,7 +123,6 @@ func getDestinationRule(se *v1alpha32.ServiceEntry, locality string, gtpTrafficP } func getOutlierDetection(se *v1alpha32.ServiceEntry, locality string, gtpTrafficPolicy *model.TrafficPolicy) *v1alpha32.OutlierDetection { - outlierDetection := &v1alpha32.OutlierDetection{ BaseEjectionTime: &duration.Duration{Seconds: DefaultBaseEjectionTime}, ConsecutiveGatewayErrors: &wrappers.UInt32Value{Value: DefaultConsecutiveGatewayErrors}, @@ -271,7 +283,6 @@ func (dh *SidecarHandler) Updated(ctx context.Context, obj *v1alpha3.Sidecar) {} func (dh *SidecarHandler) Deleted(ctx context.Context, obj *v1alpha3.Sidecar) {} func IgnoreIstioResource(exportTo []string, annotations map[string]string, namespace string) bool { - if len(annotations) > 0 && annotations[common.AdmiralIgnoreAnnotation] == "true" { return true } @@ -293,44 +304,35 @@ func IgnoreIstioResource(exportTo []string, annotations map[string]string, names } func handleDestinationRuleEvent(ctx context.Context, obj *v1alpha3.DestinationRule, dh *DestinationRuleHandler, event common.Event, resourceType common.ResourceType) { - //nolint - destinationRule := obj.Spec - - clusterId := dh.ClusterID - - syncNamespace := common.GetSyncNamespace() - - r := dh.RemoteRegistry - - dependentClusters := r.AdmiralCache.CnameDependentClusterCache.Get(destinationRule.Host).Copy() + var ( + //nolint + destinationRule = obj.Spec + clusterId = dh.ClusterID + syncNamespace = common.GetSyncNamespace() + r = dh.RemoteRegistry + dependentClusters = r.AdmiralCache.CnameDependentClusterCache.Get(destinationRule.Host).Copy() + allDependentClusters = make(map[string]string) + ) if len(dependentClusters) > 0 { - log.Infof(LogFormat, "Event", "DestinationRule", obj.Name, clusterId, "Processing") - - allDependentClusters := make(map[string]string) - util.MapCopy(allDependentClusters, dependentClusters) - allDependentClusters[clusterId] = clusterId - for _, dependentCluster := range allDependentClusters { - rc := r.GetRemoteController(dependentCluster) - if event == common.Delete { - err := rc.DestinationRuleController.IstioClient.NetworkingV1alpha3().DestinationRules(syncNamespace).Delete(ctx, obj.Name, v12.DeleteOptions{}) if err != nil { - log.Infof(LogFormat, "Delete", "DestinationRule", obj.Name, clusterId, "success") + if k8sErrors.IsNotFound(err) { + log.Infof(LogFormat, "Delete", "DestinationRule", obj.Name, clusterId, "Either DestinationRule was already deleted, or it never existed") + } else { + log.Errorf(LogErrFormat, "Delete", "DestinationRule", obj.Name, clusterId, err) + } } else { - log.Errorf(LogErrFormat, "Delete", "DestinationRule", obj.Name, clusterId, err) + log.Infof(LogFormat, "Delete", "DestinationRule", obj.Name, clusterId, "Success") } - } else { - exist, _ := rc.DestinationRuleController.IstioClient.NetworkingV1alpha3().DestinationRules(syncNamespace).Get(ctx, obj.Name, v12.GetOptions{}) - //copy destination rule only to other clusters if dependentCluster != clusterId { addUpdateDestinationRule(ctx, obj, exist, syncNamespace, rc) @@ -350,7 +352,11 @@ func handleDestinationRuleEvent(ctx context.Context, obj *v1alpha3.DestinationRu if event == common.Delete { err := rc.DestinationRuleController.IstioClient.NetworkingV1alpha3().DestinationRules(syncNamespace).Delete(ctx, obj.Name, v12.DeleteOptions{}) if err != nil { - log.Infof(LogErrFormat, "Delete", "DestinationRule", obj.Name, clusterId, err) + if k8sErrors.IsNotFound(err) { + log.Infof(LogFormat, "Delete", "DestinationRule", obj.Name, clusterId, "Either DestinationRule was already deleted, or it never existed") + } else { + log.Errorf(LogErrFormat, "Delete", "DestinationRule", obj.Name, clusterId, err) + } } else { log.Infof(LogFormat, "Delete", "DestinationRule", obj.Name, clusterId, "Success") } @@ -363,24 +369,20 @@ func handleDestinationRuleEvent(ctx context.Context, obj *v1alpha3.DestinationRu } func handleVirtualServiceEvent(ctx context.Context, obj *v1alpha3.VirtualService, vh *VirtualServiceHandler, event common.Event, resourceType common.ResourceType) error { - + var ( + virtualService = obj.Spec + clusterId = vh.ClusterID + r = vh.RemoteRegistry + syncNamespace = common.GetSyncNamespace() + ) log.Infof(LogFormat, "Event", resourceType, obj.Name, vh.ClusterID, "Received event") - //nolint - virtualService := obj.Spec - - clusterId := vh.ClusterID - - r := vh.RemoteRegistry - - syncNamespace := common.GetSyncNamespace() - if len(virtualService.Hosts) > 1 { log.Errorf(LogFormat, "Event", resourceType, obj.Name, clusterId, "Skipping as multiple hosts not supported for virtual service namespace="+obj.Namespace) return nil } - //check if this virtual service is used by Argo rollouts for canary strategy, if so, update the corresponding SE with appropriate weights + // check if this virtual service is used by Argo rollouts for canary strategy, if so, update the corresponding SE with appropriate weights if common.GetAdmiralParams().ArgoRolloutsEnabled { rollouts, err := vh.RemoteRegistry.GetRemoteController(clusterId).RolloutController.RolloutClient.Rollouts(obj.Namespace).List(ctx, v12.ListOptions{}) @@ -398,28 +400,24 @@ func handleVirtualServiceEvent(ctx context.Context, obj *v1alpha3.VirtualService } dependentClusters := r.AdmiralCache.CnameDependentClusterCache.Get(virtualService.Hosts[0]).Copy() - if len(dependentClusters) > 0 { - for _, dependentCluster := range dependentClusters { - rc := r.GetRemoteController(dependentCluster) - if clusterId != dependentCluster { - log.Infof(LogFormat, "Event", "VirtualService", obj.Name, clusterId, "Processing") - if event == common.Delete { - log.Infof(LogFormat, "Delete", "VirtualService", obj.Name, clusterId, "Success") err := rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(syncNamespace).Delete(ctx, obj.Name, v12.DeleteOptions{}) if err != nil { - return err + if k8sErrors.IsNotFound(err) { + log.Infof(LogFormat, "Delete", "VirtualService", obj.Name, clusterId, "Either VirtualService was already deleted, or it never existed") + } else { + log.Errorf(LogErrFormat, "Delete", "VirtualService", obj.Name, clusterId, err) + } + } else { + log.Infof(LogFormat, "Delete", "VirtualService", obj.Name, clusterId, "Success") } - } else { - exist, _ := rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(syncNamespace).Get(ctx, obj.Name, v12.GetOptions{}) - //change destination host for all http routes .. to same as host on the virtual service for _, httpRoute := range virtualService.Http { for _, destination := range httpRoute.Route { @@ -429,7 +427,6 @@ func handleVirtualServiceEvent(ctx context.Context, obj *v1alpha3.VirtualService } } } - for _, tlsRoute := range virtualService.Tls { for _, destination := range tlsRoute.Route { //get at index 0, we do not support wildcards or multiple hosts currently @@ -438,7 +435,6 @@ func handleVirtualServiceEvent(ctx context.Context, obj *v1alpha3.VirtualService } } } - addUpdateVirtualService(ctx, obj, exist, syncNamespace, rc) } } @@ -448,8 +444,8 @@ func handleVirtualServiceEvent(ctx context.Context, obj *v1alpha3.VirtualService log.Infof(LogFormat, "Event", "VirtualService", obj.Name, clusterId, "No dependent clusters found") } - //copy the VirtualService `as is` if they are not generated by Admiral (not in CnameDependentClusterCache) - log.Infof(LogFormat, "Event", "VirtualService", obj.Name, clusterId, "Replicating `as is` to all clusters") + // copy the VirtualService `as is` if they are not generated by Admiral (not in CnameDependentClusterCache) + log.Infof(LogFormat, "Event", "VirtualService", obj.Name, clusterId, "Replicating 'as is' to all clusters") remoteClusters := r.GetClusterIds() for _, ClusterID := range remoteClusters { if ClusterID != clusterId { @@ -457,8 +453,11 @@ func handleVirtualServiceEvent(ctx context.Context, obj *v1alpha3.VirtualService if event == common.Delete { err := rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(syncNamespace).Delete(ctx, obj.Name, v12.DeleteOptions{}) if err != nil { - log.Infof(LogErrFormat, "Delete", "VirtualService", obj.Name, clusterId, err) - return err + if k8sErrors.IsNotFound(err) { + log.Infof(LogFormat, "Delete", "VirtualService", obj.Name, clusterId, "Either VirtualService was already deleted, or it never existed") + } else { + log.Errorf(LogErrFormat, "Delete", "VirtualService", obj.Name, clusterId, err) + } } else { log.Infof(LogFormat, "Delete", "VirtualService", obj.Name, clusterId, "Success") } @@ -472,8 +471,11 @@ func handleVirtualServiceEvent(ctx context.Context, obj *v1alpha3.VirtualService } func addUpdateVirtualService(ctx context.Context, obj *v1alpha3.VirtualService, exist *v1alpha3.VirtualService, namespace string, rc *RemoteController) { - var err error - var op string + var ( + err error + op string + ) + if obj.Annotations == nil { obj.Annotations = map[string]string{} } @@ -500,9 +502,12 @@ func addUpdateVirtualService(ctx context.Context, obj *v1alpha3.VirtualService, } func addUpdateServiceEntry(ctx context.Context, obj *v1alpha3.ServiceEntry, exist *v1alpha3.ServiceEntry, namespace string, rc *RemoteController) { - var err error - var op, diff string - var skipUpdate bool + var ( + err error + op, diff string + skipUpdate bool + ) + if obj.Annotations == nil { obj.Annotations = map[string]string{} } @@ -539,20 +544,20 @@ func addUpdateServiceEntry(ctx context.Context, obj *v1alpha3.ServiceEntry, exis } } -func skipDestructiveUpdate(rc *RemoteController, new *v1alpha3.ServiceEntry, old *v1alpha3.ServiceEntry) (skipDestructive bool, diff string) { - skipDestructive = false - destructive, diff := getServiceEntryDiff(new, old) +func skipDestructiveUpdate(rc *RemoteController, new *v1alpha3.ServiceEntry, old *v1alpha3.ServiceEntry) (bool, string) { + var ( + skipDestructive = false + destructive, diff = getServiceEntryDiff(new, old) + ) //do not update SEs during bootup phase if they are destructive if time.Since(rc.StartTime) < (2*common.GetAdmiralParams().CacheRefreshDuration) && destructive { skipDestructive = true } - return skipDestructive, diff } //Diffs only endpoints func getServiceEntryDiff(new *v1alpha3.ServiceEntry, old *v1alpha3.ServiceEntry) (destructive bool, diff string) { - //we diff only if both objects exist if old == nil || new == nil { return false, "" @@ -597,7 +602,11 @@ func deleteServiceEntry(ctx context.Context, exist *v1alpha3.ServiceEntry, names if exist != nil { err := rc.ServiceEntryController.IstioClient.NetworkingV1alpha3().ServiceEntries(namespace).Delete(ctx, exist.Name, v12.DeleteOptions{}) if err != nil { - log.Errorf(LogErrFormat, "Delete", "ServiceEntry", exist.Name, rc.ClusterID, err) + if k8sErrors.IsNotFound(err) { + log.Infof(LogFormat, "Delete", "ServiceEntry", exist.Name, rc.ClusterID, "Either ServiceEntry was already deleted, or it never existed") + } else { + log.Errorf(LogErrFormat, "Delete", "ServiceEntry", exist.Name, rc.ClusterID, err) + } } else { log.Infof(LogFormat, "Delete", "ServiceEntry", exist.Name, rc.ClusterID, "Success") } @@ -636,7 +645,11 @@ func deleteDestinationRule(ctx context.Context, exist *v1alpha3.DestinationRule, if exist != nil { err := rc.DestinationRuleController.IstioClient.NetworkingV1alpha3().DestinationRules(namespace).Delete(ctx, exist.Name, v12.DeleteOptions{}) if err != nil { - log.Errorf(LogErrFormat, "Delete", "DestinationRule", exist.Name, rc.ClusterID, err) + if k8sErrors.IsNotFound(err) { + log.Infof(LogFormat, "Delete", "DestinationRule", exist.Name, rc.ClusterID, "Either DestinationRule was already deleted, or it never existed") + } else { + log.Errorf(LogErrFormat, "Delete", "DestinationRule", exist.Name, rc.ClusterID, err) + } } else { log.Infof(LogFormat, "Delete", "DestinationRule", exist.Name, rc.ClusterID, "Success") } @@ -659,13 +672,10 @@ func createDestinationRuleSkeletion(dr v1alpha32.DestinationRule, name string, n } func getServiceForDeployment(rc *RemoteController, deployment *k8sAppsV1.Deployment) *k8sV1.Service { - if deployment == nil { return nil } - cachedServices := rc.ServiceController.Cache.Get(deployment.Namespace) - if cachedServices == nil { return nil } @@ -686,11 +696,9 @@ func getServiceForDeployment(rc *RemoteController, deployment *k8sAppsV1.Deploym func getDependentClusters(dependents map[string]string, identityClusterCache *common.MapOfMaps, sourceServices map[string]*k8sV1.Service) map[string]string { var dependentClusters = make(map[string]string) - if dependents == nil { return dependentClusters } - for depIdentity := range dependents { clusters := identityClusterCache.Get(depIdentity) if clusters == nil { @@ -707,9 +715,11 @@ func getDependentClusters(dependents map[string]string, identityClusterCache *co } func copyEndpoint(e *v1alpha32.WorkloadEntry) *v1alpha32.WorkloadEntry { - labels := make(map[string]string) + var ( + labels = make(map[string]string) + ports = make(map[string]uint32) + ) util.MapCopy(labels, e.Labels) - ports := make(map[string]uint32) util.MapCopy(ports, e.Ports) return &v1alpha32.WorkloadEntry{Address: e.Address, Ports: ports, Locality: e.Locality, Labels: labels} } @@ -718,35 +728,31 @@ func copyEndpoint(e *v1alpha32.WorkloadEntry) *v1alpha32.WorkloadEntry { // 1. Canary strategy - which can use a virtual service to manage the weights associated with a stable and canary service. Admiral created endpoints in service entries will use the weights assigned in the Virtual Service // 2. Blue green strategy- this contains 2 service instances in a namespace, an active service and a preview service. Admiral will use repective service to create active and preview endpoints func getServiceForRollout(ctx context.Context, rc *RemoteController, rollout *argo.Rollout) map[string]*WeightedService { - if rollout == nil { return nil } cachedServices := rc.ServiceController.Cache.Get(rollout.Namespace) - if cachedServices == nil { return nil } rolloutStrategy := rollout.Spec.Strategy - if rolloutStrategy.BlueGreen == nil && rolloutStrategy.Canary == nil { return nil } - - var canaryService, stableService, virtualServiceRouteName string - - var istioCanaryWeights = make(map[string]int32) - - var blueGreenActiveService string - var blueGreenPreviewService string - - var matchedServices = make(map[string]*WeightedService) + var ( + canaryService string + stableService string + virtualServiceRouteName string + istioCanaryWeights = make(map[string]int32) + blueGreenActiveService string + blueGreenPreviewService string + matchedServices = make(map[string]*WeightedService) + ) if rolloutStrategy.BlueGreen != nil { // If rollout uses blue green strategy blueGreenActiveService = rolloutStrategy.BlueGreen.ActiveService blueGreenPreviewService = rolloutStrategy.BlueGreen.PreviewService - if len(blueGreenActiveService) == 0 { //pick a service that ends in RolloutActiveServiceSuffix if one is available blueGreenActiveService = GetServiceWithSuffixMatch(common.RolloutActiveServiceSuffix, cachedServices) @@ -757,7 +763,6 @@ func getServiceForRollout(ctx context.Context, rc *RemoteController, rollout *ar //calculate canary weights if canary strategy is using Istio traffic management if len(stableService) > 0 && len(canaryService) > 0 && rolloutStrategy.Canary.TrafficRouting != nil && rolloutStrategy.Canary.TrafficRouting.Istio != nil { - //pick stable service if specified if len(stableService) > 0 { istioCanaryWeights[stableService] = 1 @@ -835,7 +840,6 @@ func getServiceForRollout(ctx context.Context, rc *RemoteController, rollout *ar log.Infof("Skipping service=%s for rollout=%s in namespace=%s and cluster=%s", service.Name, rollout.Name, rollout.Namespace, rc.ClusterID) continue } - match := common.IsServiceMatch(service.Spec.Selector, rollout.Spec.Selector) //make sure the service matches the rollout Selector and also has a mesh port in the port spec if match { @@ -857,7 +861,6 @@ func getServiceForRollout(ctx context.Context, rc *RemoteController, rollout *ar log.Infof("Skipping service=%s for rollout=%s in namespace=%s and cluster=%s", service.Name, rollout.Name, rollout.Namespace, rc.ClusterID) continue } - match := common.IsServiceMatch(service.Spec.Selector, rollout.Spec.Selector) //make sure the service matches the rollout Selector and also has a mesh port in the port spec if match { diff --git a/admiral/pkg/clusters/serviceentry.go b/admiral/pkg/clusters/serviceentry.go index 327e8394..6f99b46c 100644 --- a/admiral/pkg/clusters/serviceentry.go +++ b/admiral/pkg/clusters/serviceentry.go @@ -35,7 +35,7 @@ type SeDrTuple struct { DestinationRule *networking.DestinationRule } -func createServiceEntry(ctx context.Context, event admiral.EventType, rc *RemoteController, admiralCache *AdmiralCache, +func createServiceEntryForDeployment(ctx context.Context, event admiral.EventType, rc *RemoteController, admiralCache *AdmiralCache, meshPorts map[string]uint32, destDeployment *k8sAppsV1.Deployment, serviceEntries map[string]*networking.ServiceEntry) *networking.ServiceEntry { workloadIdentityKey := common.GetWorkloadIdentifier() @@ -50,13 +50,12 @@ func createServiceEntry(ctx context.Context, event admiral.EventType, rc *Remote } san := getSanForDeployment(destDeployment, workloadIdentityKey) - - tmpSe := generateServiceEntry(event, admiralCache, meshPorts, globalFqdn, rc, serviceEntries, address, san) - return tmpSe + return generateServiceEntry(event, admiralCache, meshPorts, globalFqdn, rc, serviceEntries, address, san) } -func modifyServiceEntryForNewServiceOrPod(ctx context.Context, event admiral.EventType, env string, sourceIdentity string, remoteRegistry *RemoteRegistry) map[string]*networking.ServiceEntry { - +func modifyServiceEntryForNewServiceOrPod( + ctx context.Context, event admiral.EventType, env string, + sourceIdentity string, remoteRegistry *RemoteRegistry) map[string]*networking.ServiceEntry { defer util.LogElapsedTime("modifyServiceEntryForNewServiceOrPod", sourceIdentity, env, "")() if CurrentAdmiralState.ReadOnly { @@ -68,47 +67,38 @@ func modifyServiceEntryForNewServiceOrPod(ctx context.Context, event admiral.Eve log.Infof(LogFormat, event, env, sourceIdentity, "", "Processing skipped during cache warm up state") return nil } - //create a service entry, destination rule and virtual service in the local cluster - sourceServices := make(map[string]*k8sV1.Service) - sourceWeightedServices := make(map[string]map[string]*WeightedService) - sourceDeployments := make(map[string]*k8sAppsV1.Deployment) - sourceRollouts := make(map[string]*argo.Rollout) - - var serviceEntries = make(map[string]*networking.ServiceEntry) - - var cname string - cnames := make(map[string]string) - var serviceInstance *k8sV1.Service - var weightedServices map[string]*WeightedService - var rollout *argo.Rollout - var deployment *k8sAppsV1.Deployment - var gtps = make(map[string][]*v1.GlobalTrafficPolicy) - - var namespace string - - var gtpKey = common.ConstructGtpKey(env, sourceIdentity) - start := time.Now() - - clusters := remoteRegistry.GetClusterIds() + var ( + cname string + namespace string + serviceInstance *k8sV1.Service + rollout *argo.Rollout + deployment *k8sAppsV1.Deployment + start = time.Now() + gtpKey = common.ConstructGtpKey(env, sourceIdentity) + clusters = remoteRegistry.GetClusterIds() + gtps = make(map[string][]*v1.GlobalTrafficPolicy) + weightedServices = make(map[string]*WeightedService) + cnames = make(map[string]string) + sourceServices = make(map[string]*k8sV1.Service) + sourceWeightedServices = make(map[string]map[string]*WeightedService) + sourceDeployments = make(map[string]*k8sAppsV1.Deployment) + sourceRollouts = make(map[string]*argo.Rollout) + serviceEntries = make(map[string]*networking.ServiceEntry) + ) for _, clusterId := range clusters { - rc := remoteRegistry.GetRemoteController(clusterId) - if rc == nil { log.Warnf(LogFormat, "Find", "remote-controller", clusterId, clusterId, "remote controller not available/initialized for the cluster") continue } - if rc.DeploymentController != nil { deployment = rc.DeploymentController.Cache.Get(sourceIdentity, env) } - if rc.RolloutController != nil { rollout = rc.RolloutController.Cache.Get(sourceIdentity, env) } - if deployment != nil { remoteRegistry.AdmiralCache.IdentityClusterCache.Put(sourceIdentity, rc.ClusterID, rc.ClusterID) serviceInstance = getServiceForDeployment(rc, deployment) @@ -120,11 +110,10 @@ func modifyServiceEntryForNewServiceOrPod(ctx context.Context, event admiral.Eve cname = common.GetCname(deployment, common.GetWorkloadIdentifier(), common.GetHostnameSuffix()) sourceDeployments[rc.ClusterID] = deployment - createServiceEntry(ctx, event, rc, remoteRegistry.AdmiralCache, localMeshPorts, deployment, serviceEntries) + createServiceEntryForDeployment(ctx, event, rc, remoteRegistry.AdmiralCache, localMeshPorts, deployment, serviceEntries) } else if rollout != nil { remoteRegistry.AdmiralCache.IdentityClusterCache.Put(sourceIdentity, rc.ClusterID, rc.ClusterID) weightedServices = getServiceForRollout(ctx, rc, rollout) - if len(weightedServices) == 0 { continue } @@ -141,7 +130,6 @@ func modifyServiceEntryForNewServiceOrPod(ctx context.Context, event admiral.Eve cnames[cname] = "1" sourceRollouts[rc.ClusterID] = rollout createServiceEntryForRollout(ctx, event, rc, remoteRegistry.AdmiralCache, localMeshPorts, rollout, serviceEntries) - } else { continue } @@ -195,8 +183,8 @@ func modifyServiceEntryForNewServiceOrPod(ctx context.Context, event admiral.Eve for key, serviceEntry := range serviceEntries { if len(serviceEntry.Endpoints) == 0 { - AddServiceEntriesWithDr(ctx, remoteRegistry, map[string]string{sourceCluster: sourceCluster}, - + AddServiceEntriesWithDr( + ctx, remoteRegistry, map[string]string{sourceCluster: sourceCluster}, map[string]*networking.ServiceEntry{key: serviceEntry}) } clusterIngress, _ := rc.ServiceController.Cache.GetLoadBalancer(common.GetAdmiralParams().LabelSet.GatewayApp, common.NamespaceIstioSystem) @@ -207,9 +195,8 @@ func modifyServiceEntryForNewServiceOrPod(ctx context.Context, event admiral.Eve if blueGreenStrategy { oldPorts := ep.Ports updateEndpointsForBlueGreen(sourceRollouts[sourceCluster], sourceWeightedServices[sourceCluster], cnames, ep, sourceCluster, key) - - AddServiceEntriesWithDr(ctx, remoteRegistry, map[string]string{sourceCluster: sourceCluster}, - + AddServiceEntriesWithDr( + ctx, remoteRegistry, map[string]string{sourceCluster: sourceCluster}, map[string]*networking.ServiceEntry{key: serviceEntry}) //swap it back to use for next iteration ep.Address = clusterIngress @@ -219,17 +206,17 @@ func modifyServiceEntryForNewServiceOrPod(ctx context.Context, event admiral.Eve //add one endpoint per each service, may be modify var se = copyServiceEntry(serviceEntry) updateEndpointsForWeightedServices(se, sourceWeightedServices[sourceCluster], clusterIngress, meshPorts) - AddServiceEntriesWithDr(ctx, remoteRegistry, map[string]string{sourceCluster: sourceCluster}, - + AddServiceEntriesWithDr( + ctx, remoteRegistry, map[string]string{sourceCluster: sourceCluster}, map[string]*networking.ServiceEntry{key: se}) } else { ep.Address = localFqdn oldPorts := ep.Ports ep.Ports = meshPorts - AddServiceEntriesWithDr(ctx, remoteRegistry, map[string]string{sourceCluster: sourceCluster}, - + AddServiceEntriesWithDr( + ctx, remoteRegistry, map[string]string{sourceCluster: sourceCluster}, map[string]*networking.ServiceEntry{key: serviceEntry}) - //swap it back to use for next iteration + // swap it back to use for next iteration ep.Address = clusterIngress ep.Ports = oldPorts } @@ -244,15 +231,12 @@ func modifyServiceEntryForNewServiceOrPod(ctx context.Context, event admiral.Eve for _, val := range dependents { remoteRegistry.AdmiralCache.DependencyNamespaceCache.Put(val, serviceInstance.Namespace, localFqdn, cnames) } - } util.LogElapsedTimeSince("WriteServiceEntryToSourceClusters", sourceIdentity, env, "", start) //Write to dependent clusters - start = time.Now() - dependentClusters := getDependentClusters(dependents, remoteRegistry.AdmiralCache.IdentityClusterCache, sourceServices) //update cname dependent cluster cache @@ -421,7 +405,6 @@ func modifySidecarForLocalClusterCommunication(ctx context.Context, sidecarNames func addUpdateSidecar(ctx context.Context, obj *v1alpha3.Sidecar, exist *v1alpha3.Sidecar, namespace string, rc *RemoteController) { var err error _, err = rc.SidecarController.IstioClient.NetworkingV1alpha3().Sidecars(namespace).Update(ctx, obj, v12.UpdateOptions{}) - if err != nil { log.Infof(LogErrFormat, "Update", "Sidecar", obj.Name, rc.ClusterID, err) } else { @@ -439,9 +422,7 @@ func copySidecar(sidecar *v1alpha3.Sidecar) *v1alpha3.Sidecar { //AddServiceEntriesWithDr will create the default service entries and also additional ones specified in GTP func AddServiceEntriesWithDr(ctx context.Context, rr *RemoteRegistry, sourceClusters map[string]string, serviceEntries map[string]*networking.ServiceEntry) { - cache := rr.AdmiralCache - syncNamespace := common.GetSyncNamespace() for _, se := range serviceEntries { @@ -489,7 +470,6 @@ func AddServiceEntriesWithDr(ctx context.Context, rr *RemoteRegistry, sourceClus } else { //nolint newServiceEntry := createServiceEntrySkeletion(*seDr.ServiceEntry, seDr.SeName, syncNamespace) - if newServiceEntry != nil { newServiceEntry.Labels = map[string]string{common.GetWorkloadIdentifier(): fmt.Sprintf("%v", identityId)} addUpdateServiceEntry(ctx, newServiceEntry, oldServiceEntry, syncNamespace, rc) diff --git a/admiral/pkg/clusters/serviceentry_test.go b/admiral/pkg/clusters/serviceentry_test.go index 7558f3c6..74456d3b 100644 --- a/admiral/pkg/clusters/serviceentry_test.go +++ b/admiral/pkg/clusters/serviceentry_test.go @@ -804,8 +804,7 @@ func TestCreateServiceEntry(t *testing.T) { //Run the test for every provided case for _, c := range deploymentSeCreationTestCases { t.Run(c.name, func(t *testing.T) { - var createdSE *istionetworkingv1alpha3.ServiceEntry - createdSE = createServiceEntry(ctx, c.action, c.rc, &c.admiralCache, c.meshPorts, &c.deployment, c.serviceEntries) + createdSE := createServiceEntryForDeployment(ctx, c.action, c.rc, &c.admiralCache, c.meshPorts, &c.deployment, c.serviceEntries) if !reflect.DeepEqual(createdSE, c.expectedResult) { t.Errorf("Test %s failed, expected: %v got %v", c.name, c.expectedResult, createdSE) } @@ -855,13 +854,12 @@ func TestCreateServiceEntry(t *testing.T) { } func TestCreateServiceEntryForNewServiceOrPodRolloutsUsecase(t *testing.T) { - - const NAMESPACE = "test-test" - const SERVICENAME = "serviceNameActive" - const ROLLOUT_POD_HASH_LABEL string = "rollouts-pod-template-hash" - + const ( + namespace = "test-test" + serviceName = "serviceNameActive" + rolloutPodHashLabel string = "rollouts-pod-template-hash" + ) ctx := context.Background() - p := common.AdmiralParams{ KubeconfigPath: "testdata/fake.config", } @@ -937,7 +935,7 @@ func TestCreateServiceEntryForNewServiceOrPodRolloutsUsecase(t *testing.T) { }, } - rollout.Namespace = NAMESPACE + rollout.Namespace = namespace rollout.Spec.Strategy = argo.RolloutStrategy{ Canary: &argo.CanaryStrategy{}, } @@ -956,15 +954,15 @@ func TestCreateServiceEntryForNewServiceOrPodRolloutsUsecase(t *testing.T) { selectorMap := make(map[string]string) selectorMap["app"] = "test" - selectorMap[ROLLOUT_POD_HASH_LABEL] = "hash" + selectorMap[rolloutPodHashLabel] = "hash" activeService := &coreV1.Service{ Spec: coreV1.ServiceSpec{ Selector: selectorMap, }, } - activeService.Name = SERVICENAME - activeService.Namespace = NAMESPACE + activeService.Name = serviceName + activeService.Namespace = namespace port1 := coreV1.ServicePort{ Port: 8080, Name: "random1", From 2065e57bde7da51df00842912266e15d6dd31c04 Mon Sep 17 00:00:00 2001 From: Anubhav Aeron Date: Fri, 26 Aug 2022 12:14:33 -0700 Subject: [PATCH 2/3] skip lint check on copying virtual service object Signed-off-by: Anubhav Aeron --- admiral/pkg/clusters/handler.go | 1 + admiral/pkg/clusters/handler_test.go | 2 -- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/admiral/pkg/clusters/handler.go b/admiral/pkg/clusters/handler.go index 36e37f23..91a09d1b 100644 --- a/admiral/pkg/clusters/handler.go +++ b/admiral/pkg/clusters/handler.go @@ -370,6 +370,7 @@ func handleDestinationRuleEvent(ctx context.Context, obj *v1alpha3.DestinationRu func handleVirtualServiceEvent(ctx context.Context, obj *v1alpha3.VirtualService, vh *VirtualServiceHandler, event common.Event, resourceType common.ResourceType) error { var ( + //nolint virtualService = obj.Spec clusterId = vh.ClusterID r = vh.RemoteRegistry diff --git a/admiral/pkg/clusters/handler_test.go b/admiral/pkg/clusters/handler_test.go index 60999a8e..31044d1d 100644 --- a/admiral/pkg/clusters/handler_test.go +++ b/admiral/pkg/clusters/handler_test.go @@ -29,7 +29,6 @@ import ( ) func TestGetDependentClusters(t *testing.T) { - identityClusterCache := common.NewMapOfMaps() identityClusterCache.Put("id1", "dep1", "cl1") identityClusterCache.Put("id2", "dep2", "cl2") @@ -102,7 +101,6 @@ func TestGetDependentClusters(t *testing.T) { } func TestIgnoreIstioResource(t *testing.T) { - //Struct of test case info. Name is required. testCases := []struct { name string From d63b6e57ff0cb54006598e4fc4bb5e9f1b690f3c Mon Sep 17 00:00:00 2001 From: Anubhav Aeron Date: Fri, 26 Aug 2022 13:25:52 -0700 Subject: [PATCH 3/3] add test case for non-existent vs Signed-off-by: Anubhav Aeron --- admiral/pkg/clusters/handler.go | 4 +- admiral/pkg/clusters/handler_test.go | 449 +++++++++++++++------------ 2 files changed, 247 insertions(+), 206 deletions(-) diff --git a/admiral/pkg/clusters/handler.go b/admiral/pkg/clusters/handler.go index 91a09d1b..1d160728 100644 --- a/admiral/pkg/clusters/handler.go +++ b/admiral/pkg/clusters/handler.go @@ -368,7 +368,9 @@ func handleDestinationRuleEvent(ctx context.Context, obj *v1alpha3.DestinationRu } } -func handleVirtualServiceEvent(ctx context.Context, obj *v1alpha3.VirtualService, vh *VirtualServiceHandler, event common.Event, resourceType common.ResourceType) error { +func handleVirtualServiceEvent( + ctx context.Context, obj *v1alpha3.VirtualService, vh *VirtualServiceHandler, + event common.Event, resourceType common.ResourceType) error { var ( //nolint virtualService = obj.Spec diff --git a/admiral/pkg/clusters/handler_test.go b/admiral/pkg/clusters/handler_test.go index 31044d1d..431e957e 100644 --- a/admiral/pkg/clusters/handler_test.go +++ b/admiral/pkg/clusters/handler_test.go @@ -7,24 +7,23 @@ import ( "testing" "time" - argo "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" - "github.com/golang/protobuf/ptypes/duration" - "github.com/golang/protobuf/ptypes/wrappers" - "github.com/google/go-cmp/cmp" "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/model" "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral" "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" "github.com/istio-ecosystem/admiral/admiral/pkg/controller/istio" "github.com/istio-ecosystem/admiral/admiral/pkg/test" + + argo "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + "github.com/golang/protobuf/ptypes/duration" + "github.com/golang/protobuf/ptypes/wrappers" + "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" "google.golang.org/protobuf/testing/protocmp" "istio.io/api/networking/v1alpha3" v1alpha32 "istio.io/client-go/pkg/apis/networking/v1alpha3" - istiofake "istio.io/client-go/pkg/clientset/versioned/fake" + istioFake "istio.io/client-go/pkg/clientset/versioned/fake" coreV1 "k8s.io/api/core/v1" - k8sV1 "k8s.io/api/core/v1" - k8sv1 "k8s.io/apimachinery/pkg/apis/meta/v1" - v12 "k8s.io/apimachinery/pkg/apis/meta/v1" + metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/rest" ) @@ -38,7 +37,7 @@ func TestGetDependentClusters(t *testing.T) { name string dependents map[string]string identityClusterCache *common.MapOfMaps - sourceServices map[string]*k8sV1.Service + sourceServices map[string]*coreV1.Service expectedResult map[string]string }{ { @@ -66,8 +65,8 @@ func TestGetDependentClusters(t *testing.T) { "id1": "val1", }, identityClusterCache: identityClusterCache, - sourceServices: map[string]*k8sV1.Service{ - "cl1": &k8sV1.Service{}, + sourceServices: map[string]*coreV1.Service{ + "cl1": &coreV1.Service{}, }, expectedResult: map[string]string{}, }, @@ -77,9 +76,9 @@ func TestGetDependentClusters(t *testing.T) { "id1": "val1", }, identityClusterCache: identityClusterCache, - sourceServices: map[string]*k8sV1.Service{ - "cl99": &k8sV1.Service{ - ObjectMeta: v12.ObjectMeta{ + sourceServices: map[string]*coreV1.Service{ + "cl99": &coreV1.Service{ + ObjectMeta: metaV1.ObjectMeta{ Name: "testservice", }, }, @@ -499,43 +498,63 @@ func TestGetOutlierDetection(t *testing.T) { } func TestHandleVirtualServiceEvent(t *testing.T) { - tooManyHosts := v1alpha32.VirtualService{ - Spec: v1alpha3.VirtualService{ - Hosts: []string{"qa.blah.global", "e2e.blah.global"}, - }, - } - tooManyHosts.Namespace = "other-ns" - - happyPath := v1alpha32.VirtualService{ - Spec: v1alpha3.VirtualService{ - Hosts: []string{"e2e.blah.global"}, - }, - } - happyPath.Namespace = "other-ns" - happyPath.Name = "vs-name" - - vsNotGeneratedByAdmiral := v1alpha32.VirtualService{ - Spec: v1alpha3.VirtualService{ - Hosts: []string{"e2e.blah.something"}, - }, - } - vsNotGeneratedByAdmiral.Namespace = "other-ns" - vsNotGeneratedByAdmiral.Name = "vs-name-other-nss" + var ( + ctx = context.Background() + cnameCache = common.NewMapOfMaps() + goodCnameCache = common.NewMapOfMaps() + rr = NewRemoteRegistry(context.TODO(), common.AdmiralParams{}) + rr1 = NewRemoteRegistry(context.TODO(), common.AdmiralParams{}) + rr2 = NewRemoteRegistry(context.TODO(), common.AdmiralParams{}) + fakeIstioClient = istioFake.NewSimpleClientset() + fullFakeIstioClient = istioFake.NewSimpleClientset() + + tooManyHosts = v1alpha32.VirtualService{ + Spec: v1alpha3.VirtualService{ + Hosts: []string{"qa.blah.global", "e2e.blah.global"}, + }, + ObjectMeta: metaV1.ObjectMeta{ + Name: "too-many-hosts", + Namespace: "other-ns", + }, + } + happyPath = v1alpha32.VirtualService{ + Spec: v1alpha3.VirtualService{ + Hosts: []string{"e2e.blah.global"}, + }, + ObjectMeta: metaV1.ObjectMeta{ + Name: "vs-name", + Namespace: "other-ns", + }, + } + nonExistentVs = v1alpha32.VirtualService{ + Spec: v1alpha3.VirtualService{ + Hosts: []string{"does-not-exist.com"}, + }, + ObjectMeta: metaV1.ObjectMeta{ + Name: "does-not-exist", + Namespace: "other-ns", + }, + } + vsNotGeneratedByAdmiral = v1alpha32.VirtualService{ + Spec: v1alpha3.VirtualService{ + Hosts: []string{"e2e.blah.something"}, + }, + ObjectMeta: metaV1.ObjectMeta{ + Name: "vs-name-other-nss", + Namespace: "other-ns", + }, + } + ) - cnameCache := common.NewMapOfMaps() - rr := NewRemoteRegistry(nil, common.AdmiralParams{}) rr.AdmiralCache = &AdmiralCache{ CnameDependentClusterCache: cnameCache, SeClusterCache: common.NewMapOfMaps(), } - noDependencClustersHandler := VirtualServiceHandler{ + noDependentClustersHandler := VirtualServiceHandler{ RemoteRegistry: rr, } - fakeIstioClient := istiofake.NewSimpleClientset() - goodCnameCache := common.NewMapOfMaps() goodCnameCache.Put("e2e.blah.global", "cluster.k8s.global", "cluster.k8s.global") - rr1 := NewRemoteRegistry(nil, common.AdmiralParams{}) rr1.AdmiralCache = &AdmiralCache{ CnameDependentClusterCache: goodCnameCache, SeClusterCache: common.NewMapOfMaps(), @@ -548,17 +567,14 @@ func TestHandleVirtualServiceEvent(t *testing.T) { handlerEmptyClient := VirtualServiceHandler{ RemoteRegistry: rr1, } - ctx := context.Background() - fullFakeIstioClient := istiofake.NewSimpleClientset() fullFakeIstioClient.NetworkingV1alpha3().VirtualServices("ns").Create(ctx, &v1alpha32.VirtualService{ - ObjectMeta: v12.ObjectMeta{ + ObjectMeta: metaV1.ObjectMeta{ Name: "vs-name", }, Spec: v1alpha3.VirtualService{ Hosts: []string{"e2e.blah.global"}, }, - }, v12.CreateOptions{}) - rr2 := NewRemoteRegistry(nil, common.AdmiralParams{}) + }, metaV1.CreateOptions{}) rr2.AdmiralCache = &AdmiralCache{ CnameDependentClusterCache: goodCnameCache, SeClusterCache: common.NewMapOfMaps(), @@ -585,14 +601,14 @@ func TestHandleVirtualServiceEvent(t *testing.T) { name: "Virtual Service with multiple hosts", vs: &tooManyHosts, expectedError: nil, - handler: &noDependencClustersHandler, + handler: &noDependentClustersHandler, event: 0, }, { name: "No dependent clusters", vs: &happyPath, expectedError: nil, - handler: &noDependencClustersHandler, + handler: &noDependentClustersHandler, event: 0, }, { @@ -631,18 +647,24 @@ func TestHandleVirtualServiceEvent(t *testing.T) { event: 1, }, { - name: "Deleted Virtual Service", + name: "Deleted existing Virtual Service, should not return an error", vs: &happyPath, expectedError: nil, handler: &handlerFullClient, event: 2, }, + { + name: "Deleting virtual service which does not exist, should not return an error", + vs: &nonExistentVs, + expectedError: nil, + handler: &handlerFullClient, + event: 2, + }, } //Run the test for every provided case for _, c := range testCases { t.Run(c.name, func(t *testing.T) { - err := handleVirtualServiceEvent(ctx, c.vs, c.handler, c.event, common.VirtualService) if err != c.expectedError { t.Fatalf("Error mismatch, expected %v but got %v", c.expectedError, err) @@ -653,46 +675,50 @@ func TestHandleVirtualServiceEvent(t *testing.T) { func TestGetServiceForRolloutCanary(t *testing.T) { //Struct of test case info. Name is required. - const Namespace = "namespace" - const ServiceName = "serviceName" - const StableServiceName = "stableserviceName" - const CanaryServiceName = "canaryserviceName" - const GeneratedStableServiceName = "hello-" + common.RolloutStableServiceSuffix - const LatestMatchingService = "hello-root-service" - const VS_NAME_1 = "virtualservice1" - const VS_NAME_2 = "virtualservice2" - const VS_NAME_3 = "virtualservice3" - const VS_NAME_4 = "virtualservice4" - const VS_ROUTE_PRIMARY = "primary" - config := rest.Config{ - Host: "localhost", - } - stop := make(chan struct{}) - - s, e := admiral.NewServiceController("test", stop, &test.MockServiceHandler{}, &config, time.Second*time.Duration(300)) - r, e := admiral.NewRolloutsController("test", stop, &test.MockRolloutHandler{}, &config, time.Second*time.Duration(300)) - - fakeIstioClient := istiofake.NewSimpleClientset() - + const ( + Namespace = "namespace" + ServiceName = "serviceName" + StableServiceName = "stableserviceName" + CanaryServiceName = "canaryserviceName" + GeneratedStableServiceName = "hello-" + common.RolloutStableServiceSuffix + LatestMatchingService = "hello-root-service" + vsName1 = "virtualservice1" + vsName2 = "virtualservice2" + vsName3 = "virtualservice3" + vsName4 = "virtualservice4" + vsRoutePrimary = "primary" + ) + var ( + config = rest.Config{ + Host: "localhost", + } + stop = make(chan struct{}) + fakeIstioClient = istioFake.NewSimpleClientset() + selectorMap = map[string]string{ + "app": "test", + } + ports = []coreV1.ServicePort{{Port: 8080}, {Port: 8081}} + ) + s, err := admiral.NewServiceController("test", stop, &test.MockServiceHandler{}, &config, time.Second*time.Duration(300)) + if err != nil { + t.Fatalf("failed to initialize service controller, err: %v", err) + } + r, err := admiral.NewRolloutsController("test", stop, &test.MockRolloutHandler{}, &config, time.Second*time.Duration(300)) + if err != nil { + t.Fatalf("failed ot initialize rollout controller, err: %v", err) + } v := &istio.VirtualServiceController{ IstioClient: fakeIstioClient, } - if e != nil { - t.Fatalf("Inititalization failed") - } - rcTemp := &RemoteController{ VirtualServiceController: v, ServiceController: s, - RolloutController: r} - - selectorMap := make(map[string]string) - selectorMap["app"] = "test" - ports := []coreV1.ServicePort{{Port: 8080}, {Port: 8081}} + RolloutController: r, + } service := &coreV1.Service{ - ObjectMeta: v12.ObjectMeta{Name: ServiceName, Namespace: Namespace, CreationTimestamp: v12.NewTime(time.Now())}, + ObjectMeta: metaV1.ObjectMeta{Name: ServiceName, Namespace: Namespace, CreationTimestamp: metaV1.NewTime(time.Now())}, Spec: coreV1.ServiceSpec{ Selector: selectorMap, Ports: ports, @@ -701,7 +727,7 @@ func TestGetServiceForRolloutCanary(t *testing.T) { // namespace1 Services service1 := &coreV1.Service{ - ObjectMeta: v12.ObjectMeta{Name: "dummy1", Namespace: "namespace1", CreationTimestamp: v12.NewTime(time.Now())}, + ObjectMeta: metaV1.ObjectMeta{Name: "dummy1", Namespace: "namespace1", CreationTimestamp: metaV1.NewTime(time.Now())}, Spec: coreV1.ServiceSpec{ Selector: selectorMap, Ports: []coreV1.ServicePort{{ @@ -717,7 +743,7 @@ func TestGetServiceForRolloutCanary(t *testing.T) { // namespace4 Services service3 := &coreV1.Service{ - ObjectMeta: v12.ObjectMeta{Name: "dummy3", Namespace: "namespace4", CreationTimestamp: v12.NewTime(time.Now())}, + ObjectMeta: metaV1.ObjectMeta{Name: "dummy3", Namespace: "namespace4", CreationTimestamp: metaV1.NewTime(time.Now())}, Spec: coreV1.ServiceSpec{ Selector: selectorMap, Ports: []coreV1.ServicePort{{ @@ -732,7 +758,7 @@ func TestGetServiceForRolloutCanary(t *testing.T) { } service4 := &coreV1.Service{ - ObjectMeta: v12.ObjectMeta{Name: "dummy4", Namespace: "namespace4", CreationTimestamp: v12.NewTime(time.Now())}, + ObjectMeta: metaV1.ObjectMeta{Name: "dummy4", Namespace: "namespace4", CreationTimestamp: metaV1.NewTime(time.Now())}, Spec: coreV1.ServiceSpec{ Selector: selectorMap, Ports: []coreV1.ServicePort{{ @@ -745,7 +771,7 @@ func TestGetServiceForRolloutCanary(t *testing.T) { // namespace Services stableService := &coreV1.Service{ - ObjectMeta: v12.ObjectMeta{Name: StableServiceName, Namespace: Namespace, CreationTimestamp: v12.NewTime(time.Now().Add(time.Duration(-15)))}, + ObjectMeta: metaV1.ObjectMeta{Name: StableServiceName, Namespace: Namespace, CreationTimestamp: metaV1.NewTime(time.Now().Add(time.Duration(-15)))}, Spec: coreV1.ServiceSpec{ Selector: selectorMap, Ports: ports, @@ -753,7 +779,7 @@ func TestGetServiceForRolloutCanary(t *testing.T) { } generatedStableService := &coreV1.Service{ - ObjectMeta: v12.ObjectMeta{Name: GeneratedStableServiceName, Namespace: Namespace, CreationTimestamp: v12.NewTime(time.Now().Add(time.Duration(-15)))}, + ObjectMeta: metaV1.ObjectMeta{Name: GeneratedStableServiceName, Namespace: Namespace, CreationTimestamp: metaV1.NewTime(time.Now().Add(time.Duration(-15)))}, Spec: coreV1.ServiceSpec{ Selector: selectorMap, Ports: ports, @@ -761,7 +787,7 @@ func TestGetServiceForRolloutCanary(t *testing.T) { } canaryService := &coreV1.Service{ - ObjectMeta: v12.ObjectMeta{Name: CanaryServiceName, Namespace: Namespace, CreationTimestamp: v12.NewTime(time.Now().Add(time.Duration(-15)))}, + ObjectMeta: metaV1.ObjectMeta{Name: CanaryServiceName, Namespace: Namespace, CreationTimestamp: metaV1.NewTime(time.Now().Add(time.Duration(-15)))}, Spec: coreV1.ServiceSpec{ Selector: selectorMap, Ports: ports, @@ -769,7 +795,7 @@ func TestGetServiceForRolloutCanary(t *testing.T) { } latestMatchingService := &coreV1.Service{ - ObjectMeta: v12.ObjectMeta{Name: LatestMatchingService, Namespace: Namespace, CreationTimestamp: v12.NewTime(time.Now())}, + ObjectMeta: metaV1.ObjectMeta{Name: LatestMatchingService, Namespace: Namespace, CreationTimestamp: metaV1.NewTime(time.Now())}, Spec: coreV1.ServiceSpec{ Selector: selectorMap, Ports: ports, @@ -786,7 +812,7 @@ func TestGetServiceForRolloutCanary(t *testing.T) { rcTemp.ServiceController.Cache.Put(latestMatchingService) virtualService := &v1alpha32.VirtualService{ - ObjectMeta: v12.ObjectMeta{Name: VS_NAME_1, Namespace: Namespace}, + ObjectMeta: metaV1.ObjectMeta{Name: vsName1, Namespace: Namespace}, Spec: v1alpha3.VirtualService{ Http: []*v1alpha3.HTTPRoute{{Route: []*v1alpha3.HTTPRouteDestination{ {Destination: &v1alpha3.Destination{Host: StableServiceName}, Weight: 80}, @@ -796,9 +822,9 @@ func TestGetServiceForRolloutCanary(t *testing.T) { } vsMutipleRoutesWithMatch := &v1alpha32.VirtualService{ - ObjectMeta: v12.ObjectMeta{Name: VS_NAME_2, Namespace: Namespace}, + ObjectMeta: metaV1.ObjectMeta{Name: vsName2, Namespace: Namespace}, Spec: v1alpha3.VirtualService{ - Http: []*v1alpha3.HTTPRoute{{Name: VS_ROUTE_PRIMARY, Route: []*v1alpha3.HTTPRouteDestination{ + Http: []*v1alpha3.HTTPRoute{{Name: vsRoutePrimary, Route: []*v1alpha3.HTTPRouteDestination{ {Destination: &v1alpha3.Destination{Host: StableServiceName}, Weight: 80}, {Destination: &v1alpha3.Destination{Host: CanaryServiceName}, Weight: 20}, }}}, @@ -806,7 +832,7 @@ func TestGetServiceForRolloutCanary(t *testing.T) { } vsMutipleRoutesWithZeroWeight := &v1alpha32.VirtualService{ - ObjectMeta: v12.ObjectMeta{Name: VS_NAME_4, Namespace: Namespace}, + ObjectMeta: metaV1.ObjectMeta{Name: vsName4, Namespace: Namespace}, Spec: v1alpha3.VirtualService{ Http: []*v1alpha3.HTTPRoute{{Name: "random", Route: []*v1alpha3.HTTPRouteDestination{ {Destination: &v1alpha3.Destination{Host: StableServiceName}, Weight: 100}, @@ -815,18 +841,18 @@ func TestGetServiceForRolloutCanary(t *testing.T) { }, } ctx := context.Background() - rcTemp.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(Namespace).Create(ctx, virtualService, v12.CreateOptions{}) - rcTemp.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(Namespace).Create(ctx, vsMutipleRoutesWithMatch, v12.CreateOptions{}) - rcTemp.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(Namespace).Create(ctx, vsMutipleRoutesWithZeroWeight, v12.CreateOptions{}) + rcTemp.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(Namespace).Create(ctx, virtualService, metaV1.CreateOptions{}) + rcTemp.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(Namespace).Create(ctx, vsMutipleRoutesWithMatch, metaV1.CreateOptions{}) + rcTemp.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(Namespace).Create(ctx, vsMutipleRoutesWithZeroWeight, metaV1.CreateOptions{}) canaryRollout := argo.Rollout{ Spec: argo.RolloutSpec{Template: coreV1.PodTemplateSpec{ - ObjectMeta: k8sv1.ObjectMeta{Annotations: map[string]string{}}, + ObjectMeta: metaV1.ObjectMeta{Annotations: map[string]string{}}, }}} matchLabel := make(map[string]string) matchLabel["app"] = "test" - labelSelector := v12.LabelSelector{ + labelSelector := metaV1.LabelSelector{ MatchLabels: matchLabel, } canaryRollout.Spec.Selector = &labelSelector @@ -838,12 +864,12 @@ func TestGetServiceForRolloutCanary(t *testing.T) { canaryRolloutNS1 := argo.Rollout{ Spec: argo.RolloutSpec{Template: coreV1.PodTemplateSpec{ - ObjectMeta: k8sv1.ObjectMeta{Annotations: map[string]string{}}, + ObjectMeta: metaV1.ObjectMeta{Annotations: map[string]string{}}, }}} matchLabel2 := make(map[string]string) matchLabel2["app"] = "test1" - labelSelector2 := v12.LabelSelector{ + labelSelector2 := metaV1.LabelSelector{ MatchLabels: matchLabel2, } canaryRolloutNS1.Spec.Selector = &labelSelector2 @@ -855,11 +881,11 @@ func TestGetServiceForRolloutCanary(t *testing.T) { canaryRolloutNS4 := argo.Rollout{ Spec: argo.RolloutSpec{Template: coreV1.PodTemplateSpec{ - ObjectMeta: k8sv1.ObjectMeta{Annotations: map[string]string{common.SidecarEnabledPorts: "8080"}}, + ObjectMeta: metaV1.ObjectMeta{Annotations: map[string]string{common.SidecarEnabledPorts: "8080"}}, }}} matchLabel4 := make(map[string]string) matchLabel4["app"] = "test" - labelSelector4 := v12.LabelSelector{ + labelSelector4 := metaV1.LabelSelector{ MatchLabels: matchLabel4, } canaryRolloutNS4.Spec.Selector = &labelSelector4 @@ -870,7 +896,7 @@ func TestGetServiceForRolloutCanary(t *testing.T) { canaryRolloutIstioVs := argo.Rollout{ Spec: argo.RolloutSpec{Template: coreV1.PodTemplateSpec{ - ObjectMeta: k8sv1.ObjectMeta{Annotations: map[string]string{}}, + ObjectMeta: metaV1.ObjectMeta{Annotations: map[string]string{}}, }}} canaryRolloutIstioVs.Spec.Selector = &labelSelector @@ -881,7 +907,7 @@ func TestGetServiceForRolloutCanary(t *testing.T) { CanaryService: CanaryServiceName, TrafficRouting: &argo.RolloutTrafficRouting{ Istio: &argo.IstioTrafficRouting{ - VirtualService: &argo.IstioVirtualService{Name: VS_NAME_1}, + VirtualService: &argo.IstioVirtualService{Name: vsName1}, }, }, }, @@ -889,7 +915,7 @@ func TestGetServiceForRolloutCanary(t *testing.T) { canaryRolloutIstioVsRouteMatch := argo.Rollout{ Spec: argo.RolloutSpec{Template: coreV1.PodTemplateSpec{ - ObjectMeta: k8sv1.ObjectMeta{Annotations: map[string]string{}}, + ObjectMeta: metaV1.ObjectMeta{Annotations: map[string]string{}}, }}} canaryRolloutIstioVsRouteMatch.Spec.Selector = &labelSelector @@ -900,7 +926,7 @@ func TestGetServiceForRolloutCanary(t *testing.T) { CanaryService: CanaryServiceName, TrafficRouting: &argo.RolloutTrafficRouting{ Istio: &argo.IstioTrafficRouting{ - VirtualService: &argo.IstioVirtualService{Name: VS_NAME_2, Routes: []string{VS_ROUTE_PRIMARY}}, + VirtualService: &argo.IstioVirtualService{Name: vsName2, Routes: []string{vsRoutePrimary}}, }, }, }, @@ -908,7 +934,7 @@ func TestGetServiceForRolloutCanary(t *testing.T) { canaryRolloutIstioVsRouteMisMatch := argo.Rollout{ Spec: argo.RolloutSpec{Template: coreV1.PodTemplateSpec{ - ObjectMeta: k8sv1.ObjectMeta{Annotations: map[string]string{}}, + ObjectMeta: metaV1.ObjectMeta{Annotations: map[string]string{}}, }}} canaryRolloutIstioVsRouteMisMatch.Spec.Selector = &labelSelector @@ -919,7 +945,7 @@ func TestGetServiceForRolloutCanary(t *testing.T) { CanaryService: CanaryServiceName, TrafficRouting: &argo.RolloutTrafficRouting{ Istio: &argo.IstioTrafficRouting{ - VirtualService: &argo.IstioVirtualService{Name: VS_NAME_2, Routes: []string{"random"}}, + VirtualService: &argo.IstioVirtualService{Name: vsName2, Routes: []string{"random"}}, }, }, }, @@ -927,7 +953,7 @@ func TestGetServiceForRolloutCanary(t *testing.T) { canaryRolloutIstioVsZeroWeight := argo.Rollout{ Spec: argo.RolloutSpec{Template: coreV1.PodTemplateSpec{ - ObjectMeta: k8sv1.ObjectMeta{Annotations: map[string]string{}}, + ObjectMeta: metaV1.ObjectMeta{Annotations: map[string]string{}}, }}} canaryRolloutIstioVsZeroWeight.Spec.Selector = &labelSelector @@ -938,7 +964,7 @@ func TestGetServiceForRolloutCanary(t *testing.T) { CanaryService: CanaryServiceName, TrafficRouting: &argo.RolloutTrafficRouting{ Istio: &argo.IstioTrafficRouting{ - VirtualService: &argo.IstioVirtualService{Name: VS_NAME_4}, + VirtualService: &argo.IstioVirtualService{Name: vsName4}, }, }, }, @@ -946,7 +972,7 @@ func TestGetServiceForRolloutCanary(t *testing.T) { canaryRolloutWithStableService := argo.Rollout{ Spec: argo.RolloutSpec{Template: coreV1.PodTemplateSpec{ - ObjectMeta: k8sv1.ObjectMeta{Annotations: map[string]string{}}, + ObjectMeta: metaV1.ObjectMeta{Annotations: map[string]string{}}, }}} canaryRolloutWithStableService.Spec.Selector = &labelSelector @@ -960,7 +986,7 @@ func TestGetServiceForRolloutCanary(t *testing.T) { canaryRolloutIstioVsMimatch := argo.Rollout{ Spec: argo.RolloutSpec{Template: coreV1.PodTemplateSpec{ - ObjectMeta: k8sv1.ObjectMeta{Annotations: map[string]string{}}, + ObjectMeta: metaV1.ObjectMeta{Annotations: map[string]string{}}, }}} canaryRolloutIstioVsMimatch.Spec.Selector = &labelSelector @@ -979,7 +1005,7 @@ func TestGetServiceForRolloutCanary(t *testing.T) { canaryRolloutWithStableServiceNS4 := argo.Rollout{ Spec: argo.RolloutSpec{Template: coreV1.PodTemplateSpec{ - ObjectMeta: k8sv1.ObjectMeta{Annotations: map[string]string{common.SidecarEnabledPorts: "8080"}}, + ObjectMeta: metaV1.ObjectMeta{Annotations: map[string]string{common.SidecarEnabledPorts: "8080"}}, }}} canaryRolloutWithStableServiceNS4.Spec.Selector = &labelSelector @@ -1070,7 +1096,7 @@ func TestGetServiceForRolloutCanary(t *testing.T) { t.Run(c.name, func(t *testing.T) { result := getServiceForRollout(ctx, c.rc, c.rollout) if len(c.result) == 0 { - if result != nil && len(result) != 0 { + if len(result) != 0 { t.Fatalf("Service expected to be nil") } } else { @@ -1093,73 +1119,88 @@ func TestGetServiceForRolloutCanary(t *testing.T) { func TestGetServiceForRolloutBlueGreen(t *testing.T) { //Struct of test case info. Name is required. - const NAMESPACE = "namespace" - const SERVICENAME = "serviceNameActive" - const GeneratedActiveServiceName = "hello-" + common.RolloutActiveServiceSuffix - const ROLLOUT_POD_HASH_LABEL string = "rollouts-pod-template-hash" - - config := rest.Config{ - Host: "localhost", + const ( + namespace = "namespace" + serviceName = "serviceNameActive" + generatedActiveServiceName = "hello-" + common.RolloutActiveServiceSuffix + rolloutPodHashLabel string = "rollouts-pod-template-hash" + ) + var ( + stop = make(chan struct{}) + config = rest.Config{ + Host: "localhost", + } + matchLabel = map[string]string{ + "app": "test", + } + labelSelector = metaV1.LabelSelector{ + MatchLabels: matchLabel, + } + bgRollout = argo.Rollout{ + Spec: argo.RolloutSpec{ + Selector: &labelSelector, + Strategy: argo.RolloutStrategy{ + BlueGreen: &argo.BlueGreenStrategy{ + ActiveService: serviceName, + PreviewService: "previewService", + }, + }, + Template: coreV1.PodTemplateSpec{ + ObjectMeta: metaV1.ObjectMeta{Annotations: map[string]string{}}, + }, + }, + ObjectMeta: metaV1.ObjectMeta{ + Namespace: namespace, + }, + } + bgRolloutNoActiveService = argo.Rollout{ + Spec: argo.RolloutSpec{ + Selector: &labelSelector, + Strategy: argo.RolloutStrategy{ + BlueGreen: &argo.BlueGreenStrategy{}, + }, + Template: coreV1.PodTemplateSpec{ + ObjectMeta: metaV1.ObjectMeta{Annotations: map[string]string{}}, + }, + }, + ObjectMeta: metaV1.ObjectMeta{ + Namespace: namespace, + }, + } + selectorMap = map[string]string{ + "app": "test", + rolloutPodHashLabel: "hash", + } + activeService = &coreV1.Service{ + Spec: coreV1.ServiceSpec{ + Selector: selectorMap, + }, + ObjectMeta: metaV1.ObjectMeta{ + Name: serviceName, + Namespace: namespace, + }, + } + ) + s, err := admiral.NewServiceController("test", stop, &test.MockServiceHandler{}, &config, time.Second*time.Duration(300)) + if err != nil { + t.Fatalf("failed to initialize service controller, err: %v", err) + } + r, err := admiral.NewRolloutsController("test", stop, &test.MockRolloutHandler{}, &config, time.Second*time.Duration(300)) + if err != nil { + t.Fatalf("failed to initialize rollout controller, err: %v", err) } - stop := make(chan struct{}) - - s, e := admiral.NewServiceController("test", stop, &test.MockServiceHandler{}, &config, time.Second*time.Duration(300)) - r, e := admiral.NewRolloutsController("test", stop, &test.MockRolloutHandler{}, &config, time.Second*time.Duration(300)) - - emptyCacheService, e := admiral.NewServiceController("test", stop, &test.MockServiceHandler{}, &config, time.Second*time.Duration(300)) - if e != nil { - t.Fatalf("Inititalization failed") + emptyCacheService, err := admiral.NewServiceController("test", stop, &test.MockServiceHandler{}, &config, time.Second*time.Duration(300)) + if err != nil { + t.Fatalf("failed to initialize empty service controller, err: %v", err) } rc := &RemoteController{ VirtualServiceController: &istio.VirtualServiceController{}, ServiceController: s, - RolloutController: r} - - bgRollout := argo.Rollout{ - Spec: argo.RolloutSpec{Template: coreV1.PodTemplateSpec{ - ObjectMeta: k8sv1.ObjectMeta{Annotations: map[string]string{}}, - }}} - - matchLabel := make(map[string]string) - matchLabel["app"] = "test" - - labelSelector := v12.LabelSelector{ - MatchLabels: matchLabel, - } - bgRollout.Spec.Selector = &labelSelector - - bgRollout.Namespace = NAMESPACE - bgRollout.Spec.Strategy = argo.RolloutStrategy{ - BlueGreen: &argo.BlueGreenStrategy{ - ActiveService: SERVICENAME, - PreviewService: "previewService", - }, - } - bgRolloutNoActiveService := argo.Rollout{ - Spec: argo.RolloutSpec{Template: coreV1.PodTemplateSpec{ - ObjectMeta: k8sv1.ObjectMeta{Annotations: map[string]string{}}, - }}} - - bgRolloutNoActiveService.Spec.Selector = &labelSelector - - bgRolloutNoActiveService.Namespace = NAMESPACE - bgRolloutNoActiveService.Spec.Strategy = argo.RolloutStrategy{ - BlueGreen: &argo.BlueGreenStrategy{}, + RolloutController: r, } - selectorMap := make(map[string]string) - selectorMap["app"] = "test" - selectorMap[ROLLOUT_POD_HASH_LABEL] = "hash" - - activeService := &coreV1.Service{ - Spec: coreV1.ServiceSpec{ - Selector: selectorMap, - }, - } - activeService.Name = SERVICENAME - activeService.Namespace = NAMESPACE port1 := coreV1.ServicePort{ Port: 8080, Name: "random1", @@ -1178,8 +1219,8 @@ func TestGetServiceForRolloutBlueGreen(t *testing.T) { Selector: selectorMap, }, } - generatedActiveService.Name = GeneratedActiveServiceName - generatedActiveService.Namespace = NAMESPACE + generatedActiveService.Name = generatedActiveServiceName + generatedActiveService.Namespace = namespace generatedActiveService.Spec.Ports = ports selectorMap1 := make(map[string]string) @@ -1191,7 +1232,7 @@ func TestGetServiceForRolloutBlueGreen(t *testing.T) { }, } service1.Name = "dummy" - service1.Namespace = NAMESPACE + service1.Namespace = namespace port3 := coreV1.ServicePort{ Port: 8080, Name: "random3", @@ -1207,14 +1248,14 @@ func TestGetServiceForRolloutBlueGreen(t *testing.T) { selectorMap2 := make(map[string]string) selectorMap2["app"] = "test" - selectorMap2[ROLLOUT_POD_HASH_LABEL] = "hash" + selectorMap2[rolloutPodHashLabel] = "hash" previewService := &coreV1.Service{ Spec: coreV1.ServiceSpec{ Selector: selectorMap, }, } previewService.Name = "previewService" - previewService.Namespace = NAMESPACE + previewService.Namespace = namespace port5 := coreV1.ServicePort{ Port: 8080, Name: "random3", @@ -1257,21 +1298,21 @@ func TestGetServiceForRolloutBlueGreen(t *testing.T) { noStratergyRollout := argo.Rollout{ Spec: argo.RolloutSpec{Template: coreV1.PodTemplateSpec{ - ObjectMeta: k8sv1.ObjectMeta{Annotations: map[string]string{}}, + ObjectMeta: metaV1.ObjectMeta{Annotations: map[string]string{}}, }}} - noStratergyRollout.Namespace = NAMESPACE + noStratergyRollout.Namespace = namespace noStratergyRollout.Spec.Strategy = argo.RolloutStrategy{} bgRolloutNs1 := argo.Rollout{ Spec: argo.RolloutSpec{Template: coreV1.PodTemplateSpec{ - ObjectMeta: k8sv1.ObjectMeta{Annotations: map[string]string{}}, + ObjectMeta: metaV1.ObjectMeta{Annotations: map[string]string{}}, }}} matchLabel1 := make(map[string]string) matchLabel1["app"] = "test" - labelSelector1 := v12.LabelSelector{ + labelSelector1 := metaV1.LabelSelector{ MatchLabels: matchLabel, } bgRolloutNs1.Spec.Selector = &labelSelector1 @@ -1279,13 +1320,13 @@ func TestGetServiceForRolloutBlueGreen(t *testing.T) { bgRolloutNs1.Namespace = "namespace1" bgRolloutNs1.Spec.Strategy = argo.RolloutStrategy{ BlueGreen: &argo.BlueGreenStrategy{ - ActiveService: SERVICENAME, + ActiveService: serviceName, PreviewService: "previewService", }, } - resultForBlueGreen := map[string]*WeightedService{SERVICENAME: {Weight: 1, Service: activeService}} - resultForNoActiveService := map[string]*WeightedService{GeneratedActiveServiceName: {Weight: 1, Service: generatedActiveService}} + resultForBlueGreen := map[string]*WeightedService{serviceName: {Weight: 1, Service: activeService}} + resultForNoActiveService := map[string]*WeightedService{generatedActiveServiceName: {Weight: 1, Service: generatedActiveService}} testCases := []struct { name string @@ -1337,7 +1378,7 @@ func TestGetServiceForRolloutBlueGreen(t *testing.T) { t.Run(c.name, func(t *testing.T) { result := getServiceForRollout(ctx, c.rc, c.rollout) if len(c.result) == 0 { - if result != nil && len(result) > 0 { + if len(result) > 0 { t.Fatalf("Service expected to be nil") } } else { @@ -1356,7 +1397,6 @@ func TestGetServiceForRolloutBlueGreen(t *testing.T) { } func TestSkipDestructiveUpdate(t *testing.T) { - twoEndpointSe := v1alpha3.ServiceEntry{ Hosts: []string{"e2e.my-first-service.mesh"}, Addresses: []string{"240.10.1.1"}, @@ -1399,31 +1439,31 @@ func TestSkipDestructiveUpdate(t *testing.T) { } newSeTwoEndpoints := &v1alpha32.ServiceEntry{ - ObjectMeta: v12.ObjectMeta{Name: "se1", Namespace: "random"}, + ObjectMeta: metaV1.ObjectMeta{Name: "se1", Namespace: "random"}, //nolint Spec: twoEndpointSe, } newSeTwoEndpointsUpdated := &v1alpha32.ServiceEntry{ - ObjectMeta: v12.ObjectMeta{Name: "se1", Namespace: "random"}, + ObjectMeta: metaV1.ObjectMeta{Name: "se1", Namespace: "random"}, //nolint Spec: twoEndpointSeUpdated, } newSeOneEndpoint := &v1alpha32.ServiceEntry{ - ObjectMeta: v12.ObjectMeta{Name: "se1", Namespace: "random"}, + ObjectMeta: metaV1.ObjectMeta{Name: "se1", Namespace: "random"}, //nolint Spec: oneEndpointSe, } oldSeTwoEndpoints := &v1alpha32.ServiceEntry{ - ObjectMeta: v12.ObjectMeta{Name: "se1", Namespace: "random"}, + ObjectMeta: metaV1.ObjectMeta{Name: "se1", Namespace: "random"}, //nolint Spec: twoEndpointSe, } oldSeOneEndpoint := &v1alpha32.ServiceEntry{ - ObjectMeta: v12.ObjectMeta{Name: "se1", Namespace: "random"}, + ObjectMeta: metaV1.ObjectMeta{Name: "se1", Namespace: "random"}, //nolint Spec: oneEndpointSe, } @@ -1514,14 +1554,13 @@ func TestSkipDestructiveUpdate(t *testing.T) { } func TestAddUpdateServiceEntry(t *testing.T) { - - ctx := context.Background() - - fakeIstioClient := istiofake.NewSimpleClientset() - - seCtrl := &istio.ServiceEntryController{ - IstioClient: fakeIstioClient, - } + var ( + ctx = context.Background() + fakeIstioClient = istioFake.NewSimpleClientset() + seCtrl = &istio.ServiceEntryController{ + IstioClient: fakeIstioClient, + } + ) twoEndpointSe := v1alpha3.ServiceEntry{ Hosts: []string{"e2e.my-first-service.mesh"}, @@ -1551,18 +1590,18 @@ func TestAddUpdateServiceEntry(t *testing.T) { } newSeOneEndpoint := &v1alpha32.ServiceEntry{ - ObjectMeta: v12.ObjectMeta{Name: "se1", Namespace: "namespace"}, + ObjectMeta: metaV1.ObjectMeta{Name: "se1", Namespace: "namespace"}, //nolint Spec: oneEndpointSe, } oldSeTwoEndpoints := &v1alpha32.ServiceEntry{ - ObjectMeta: v12.ObjectMeta{Name: "se2", Namespace: "namespace"}, + ObjectMeta: metaV1.ObjectMeta{Name: "se2", Namespace: "namespace"}, //nolint Spec: twoEndpointSe, } - _, err := seCtrl.IstioClient.NetworkingV1alpha3().ServiceEntries("namespace").Create(ctx, oldSeTwoEndpoints, v12.CreateOptions{}) + _, err := seCtrl.IstioClient.NetworkingV1alpha3().ServiceEntries("namespace").Create(ctx, oldSeTwoEndpoints, metaV1.CreateOptions{}) if err != nil { t.Error(err) } @@ -1572,7 +1611,7 @@ func TestAddUpdateServiceEntry(t *testing.T) { StartTime: time.Now(), } - rcNotinWarmupPhase := &RemoteController{ + rcNotInWarmupPhase := &RemoteController{ ServiceEntryController: seCtrl, StartTime: time.Now().Add(time.Duration(-21) * time.Minute), } @@ -1601,7 +1640,7 @@ func TestAddUpdateServiceEntry(t *testing.T) { }, { name: "Should update an SE", - rc: rcNotinWarmupPhase, + rc: rcNotInWarmupPhase, newSe: newSeOneEndpoint, oldSe: oldSeTwoEndpoints, skipDestructive: false, @@ -1614,7 +1653,7 @@ func TestAddUpdateServiceEntry(t *testing.T) { addUpdateServiceEntry(ctx, c.newSe, c.oldSe, "namespace", c.rc) if c.skipDestructive { //verify the update did not go through - se, err := c.rc.ServiceEntryController.IstioClient.NetworkingV1alpha3().ServiceEntries("namespace").Get(ctx, c.oldSe.Name, v12.GetOptions{}) + se, err := c.rc.ServiceEntryController.IstioClient.NetworkingV1alpha3().ServiceEntries("namespace").Get(ctx, c.oldSe.Name, metaV1.GetOptions{}) if err != nil { t.Error(err) }