From ed01dd701d77898668cc183add1b16ecee81162e Mon Sep 17 00:00:00 2001 From: Dyanngg Date: Mon, 7 Nov 2022 16:55:45 -0800 Subject: [PATCH 1/7] Support SameLabels as peer Namespace selection in ACNP Signed-off-by: Dyanngg --- .../antrea/crds/clusternetworkpolicy.yaml | 8 + build/yamls/antrea-aks.yml | 8 + build/yamls/antrea-crds.yml | 8 + build/yamls/antrea-eks.yml | 8 + build/yamls/antrea-gke.yml | 8 + build/yamls/antrea-ipsec.yml | 8 + build/yamls/antrea.yml | 8 + pkg/apis/crd/v1beta1/types.go | 6 + pkg/apis/crd/v1beta1/zz_generated.deepcopy.go | 7 +- pkg/apiserver/openapi/zz_generated.openapi.go | 23 +- .../networkpolicy/clusternetworkpolicy.go | 196 +++++++++++++++--- .../clusternetworkpolicy_test.go | 168 ++++++++++++++- 12 files changed, 418 insertions(+), 38 deletions(-) diff --git a/build/charts/antrea/crds/clusternetworkpolicy.yaml b/build/charts/antrea/crds/clusternetworkpolicy.yaml index 44236e82f32..6027b25f055 100644 --- a/build/charts/antrea/crds/clusternetworkpolicy.yaml +++ b/build/charts/antrea/crds/clusternetworkpolicy.yaml @@ -344,6 +344,10 @@ spec: enum: - Self type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: @@ -605,6 +609,10 @@ spec: enum: - Self type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: diff --git a/build/yamls/antrea-aks.yml b/build/yamls/antrea-aks.yml index 0995e0d35af..1cd6039781e 100644 --- a/build/yamls/antrea-aks.yml +++ b/build/yamls/antrea-aks.yml @@ -910,6 +910,10 @@ spec: enum: - Self type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: @@ -1171,6 +1175,10 @@ spec: enum: - Self type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: diff --git a/build/yamls/antrea-crds.yml b/build/yamls/antrea-crds.yml index 584c14ad030..d0289ba5f1b 100644 --- a/build/yamls/antrea-crds.yml +++ b/build/yamls/antrea-crds.yml @@ -903,6 +903,10 @@ spec: enum: - Self type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: @@ -1164,6 +1168,10 @@ spec: enum: - Self type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: diff --git a/build/yamls/antrea-eks.yml b/build/yamls/antrea-eks.yml index 5d15e3393cc..b96ce87679b 100644 --- a/build/yamls/antrea-eks.yml +++ b/build/yamls/antrea-eks.yml @@ -910,6 +910,10 @@ spec: enum: - Self type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: @@ -1171,6 +1175,10 @@ spec: enum: - Self type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: diff --git a/build/yamls/antrea-gke.yml b/build/yamls/antrea-gke.yml index 5aadc9f617f..8af4f5b6291 100644 --- a/build/yamls/antrea-gke.yml +++ b/build/yamls/antrea-gke.yml @@ -910,6 +910,10 @@ spec: enum: - Self type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: @@ -1171,6 +1175,10 @@ spec: enum: - Self type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: diff --git a/build/yamls/antrea-ipsec.yml b/build/yamls/antrea-ipsec.yml index 928683a4a9a..a3e4884dcb9 100644 --- a/build/yamls/antrea-ipsec.yml +++ b/build/yamls/antrea-ipsec.yml @@ -910,6 +910,10 @@ spec: enum: - Self type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: @@ -1171,6 +1175,10 @@ spec: enum: - Self type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: diff --git a/build/yamls/antrea.yml b/build/yamls/antrea.yml index e921a844e6e..ba6a4c22c51 100644 --- a/build/yamls/antrea.yml +++ b/build/yamls/antrea.yml @@ -910,6 +910,10 @@ spec: enum: - Self type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: @@ -1171,6 +1175,10 @@ spec: enum: - Self type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: diff --git a/pkg/apis/crd/v1beta1/types.go b/pkg/apis/crd/v1beta1/types.go index 46c1280d883..cdee4779428 100644 --- a/pkg/apis/crd/v1beta1/types.go +++ b/pkg/apis/crd/v1beta1/types.go @@ -626,8 +626,14 @@ type AppliedTo struct { NodeSelector *metav1.LabelSelector `json:"nodeSelector,omitempty"` } +// PeerNamespaces describes criteria for selecting Pod/ExternalEntity +// from matched Namespaces. Only one of the criteria can be set. type PeerNamespaces struct { + // Selects from the same Namespace of the appliedTo workloads. Match NamespaceMatchType `json:"match,omitempty"` + // Selects Namespaces that share the same values for the given set of label keys + // with the appliedTo Namespace. Namespaces must have all the label keys. + SameLabels []string `json:"sameLabels,omitempty"` } // NamespaceMatchType describes Namespace matching strategy. diff --git a/pkg/apis/crd/v1beta1/zz_generated.deepcopy.go b/pkg/apis/crd/v1beta1/zz_generated.deepcopy.go index 7cde65971e8..219658c1ed8 100644 --- a/pkg/apis/crd/v1beta1/zz_generated.deepcopy.go +++ b/pkg/apis/crd/v1beta1/zz_generated.deepcopy.go @@ -1153,7 +1153,7 @@ func (in *NetworkPolicyPeer) DeepCopyInto(out *NetworkPolicyPeer) { if in.Namespaces != nil { in, out := &in.Namespaces, &out.Namespaces *out = new(PeerNamespaces) - **out = **in + (*in).DeepCopyInto(*out) } if in.ExternalEntitySelector != nil { in, out := &in.ExternalEntitySelector, &out.ExternalEntitySelector @@ -1400,6 +1400,11 @@ func (in *Packet) DeepCopy() *Packet { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PeerNamespaces) DeepCopyInto(out *PeerNamespaces) { *out = *in + if in.SameLabels != nil { + in, out := &in.SameLabels, &out.SameLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } return } diff --git a/pkg/apiserver/openapi/zz_generated.openapi.go b/pkg/apiserver/openapi/zz_generated.openapi.go index 22779c68457..9ba7fb82404 100644 --- a/pkg/apiserver/openapi/zz_generated.openapi.go +++ b/pkg/apiserver/openapi/zz_generated.openapi.go @@ -5221,12 +5221,29 @@ func schema_pkg_apis_crd_v1beta1_PeerNamespaces(ref common.ReferenceCallback) co return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, + Description: "PeerNamespaces describes criteria for selecting Pod/ExternalEntity from matched Namespaces. Only one of the criteria can be set.", + Type: []string{"object"}, Properties: map[string]spec.Schema{ "match": { SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Description: "Selects from the same Namespace of the appliedTo workloads.", + Type: []string{"string"}, + Format: "", + }, + }, + "sameLabels": { + SchemaProps: spec.SchemaProps{ + Description: "Selects Namespaces that share the same values for the given set of label keys with the appliedTo Namespace. Namespaces must have all the label keys.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, }, }, }, diff --git a/pkg/controller/networkpolicy/clusternetworkpolicy.go b/pkg/controller/networkpolicy/clusternetworkpolicy.go index f711232bfa0..8bb78d97d5e 100644 --- a/pkg/controller/networkpolicy/clusternetworkpolicy.go +++ b/pkg/controller/networkpolicy/clusternetworkpolicy.go @@ -16,6 +16,7 @@ package networkpolicy import ( "reflect" + "strings" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -33,6 +34,11 @@ import ( utilsets "antrea.io/antrea/pkg/util/sets" ) +const ( + labelValueUndefined = "Undefined" + labelValueSeparater = "," +) + func getACNPReference(cnp *crdv1beta1.ClusterNetworkPolicy) *controlplane.NetworkPolicyReference { return &controlplane.NetworkPolicyReference{ Type: controlplane.AntreaClusterNetworkPolicy, @@ -336,9 +342,10 @@ func (n *NetworkPolicyController) processClusterNetworkPolicy(cnp *crdv1beta1.Cl addressGroups := map[string]*antreatypes.AddressGroup{} // If appliedTo is set at spec level and the ACNP has per-namespace rules, then each appliedTo needs // to be split into appliedToGroups for each of its affected Namespace. - var clusterAppliedToAffectedNS []string - // atgForNamespace is the appliedToGroups split by Namespaces. - var atgForNamespace []*antreatypes.AppliedToGroup + atgPerAffectedNS := map[string]*antreatypes.AppliedToGroup{} + // When appliedTo is set at spec level and the ACNP has rules that select peer Namespaces by sameLabels, + // this field tracks the labels of all Namespaces selected by the appliedTo. + affectedNSAndLabels := map[string]map[string]string{} // clusterSetScopeSelectorKeys keeps track of all the ClusterSet-scoped selector keys of the policy. // During policy peer processing, any ClusterSet-scoped selector will be registered with the // labelIdentityInterface and added to this set. By the end of the function, this set will @@ -349,15 +356,14 @@ func (n *NetworkPolicyController) processClusterNetworkPolicy(cnp *crdv1beta1.Cl if at.ServiceAccount != nil { atg := n.createAppliedToGroup(at.ServiceAccount.Namespace, serviceAccountNameToPodSelector(at.ServiceAccount.Name), nil, nil, nil) appliedToGroups = mergeAppliedToGroups(appliedToGroups, atg) - clusterAppliedToAffectedNS = append(clusterAppliedToAffectedNS, at.ServiceAccount.Namespace) - atgForNamespace = append(atgForNamespace, atg) + atgPerAffectedNS[at.ServiceAccount.Namespace] = atg + affectedNSAndLabels[at.ServiceAccount.Namespace] = n.getNamespaceLabels(at.ServiceAccount.Namespace) } else { - affectedNS := n.getAffectedNamespacesForAppliedTo(at) - for _, ns := range affectedNS { + affectedNSAndLabels = n.getAffectedNamespacesForAppliedTo(at) + for ns := range affectedNSAndLabels { atg := n.createAppliedToGroup(ns, at.PodSelector, nil, at.ExternalEntitySelector, nil) appliedToGroups = mergeAppliedToGroups(appliedToGroups, atg) - clusterAppliedToAffectedNS = append(clusterAppliedToAffectedNS, ns) - atgForNamespace = append(atgForNamespace, atg) + atgPerAffectedNS[ns] = atg } } } @@ -366,7 +372,7 @@ func (n *NetworkPolicyController) processClusterNetworkPolicy(cnp *crdv1beta1.Cl processRules := func(cnpRules []crdv1beta1.Rule, direction controlplane.Direction) { for idx, cnpRule := range cnpRules { services, namedPortExists := toAntreaServicesForCRD(cnpRule.Ports, cnpRule.Protocols) - clusterPeers, perNSPeers := splitPeersByScope(cnpRule, direction) + clusterPeers, perNSPeers, nsLabelPeers := splitPeersByScope(cnpRule, direction) addRule := func(peer *controlplane.NetworkPolicyPeer, ruleAddressGroups []*antreatypes.AddressGroup, dir controlplane.Direction, ruleAppliedTos []*antreatypes.AppliedToGroup) { rule := controlplane.NetworkPolicyRule{ Direction: dir, @@ -390,7 +396,7 @@ func (n *NetworkPolicyController) processClusterNetworkPolicy(cnp *crdv1beta1.Cl } // When a rule's NetworkPolicyPeer is empty, a cluster level rule should be created // with an Antrea peer matching all addresses. - if len(clusterPeers) > 0 || len(perNSPeers) == 0 { + if len(clusterPeers) > 0 || len(perNSPeers)+len(nsLabelPeers) == 0 { ruleAppliedTos := cnpRule.AppliedTo // For ACNPs that have per-namespace rules, cluster-level rules will be created with appliedTo // set as the spec appliedTo for each rule. @@ -412,11 +418,11 @@ func (n *NetworkPolicyController) processClusterNetworkPolicy(cnp *crdv1beta1.Cl if len(perNSPeers) > 0 { if len(cnp.Spec.AppliedTo) > 0 { // Create a rule for each affected Namespace of appliedTo at spec level - for i := range clusterAppliedToAffectedNS { - klog.V(4).Infof("Adding a new per-namespace rule with appliedTo %v for rule %d of %s", clusterAppliedToAffectedNS[i], idx, cnp.Name) - peer, ags, selKeys := n.toNamespacedPeerForCRD(perNSPeers, cnp, clusterAppliedToAffectedNS[i]) + for ns, atg := range atgPerAffectedNS { + klog.V(4).Infof("Adding a new per-namespace rule with appliedTo %v for rule %d of %s", atg, idx, cnp.Name) + peer, ags, selKeys := n.toNamespacedPeerForCRD(perNSPeers, cnp, ns) clusterSetScopeSelectorKeys = clusterSetScopeSelectorKeys.Union(selKeys) - addRule(peer, ags, direction, []*antreatypes.AppliedToGroup{atgForNamespace[i]}) + addRule(peer, ags, direction, []*antreatypes.AppliedToGroup{atg}) } } else { // Create a rule for each affected Namespace of appliedTo at rule level @@ -429,7 +435,7 @@ func (n *NetworkPolicyController) processClusterNetworkPolicy(cnp *crdv1beta1.Cl addRule(peer, ags, direction, []*antreatypes.AppliedToGroup{atg}) } else { affectedNS := n.getAffectedNamespacesForAppliedTo(at) - for _, ns := range affectedNS { + for ns := range affectedNS { atg := n.createAppliedToGroup(ns, at.PodSelector, nil, at.ExternalEntitySelector, nil) klog.V(4).Infof("Adding a new per-namespace rule with appliedTo %v for rule %d of %s", atg, idx, cnp.Name) peer, ags, selKeys := n.toNamespacedPeerForCRD(perNSPeers, cnp, ns) @@ -440,6 +446,43 @@ func (n *NetworkPolicyController) processClusterNetworkPolicy(cnp *crdv1beta1.Cl } } } + if len(nsLabelPeers) > 0 { + if len(cnp.Spec.AppliedTo) > 0 { + // All affected Namespaces and their labels are already stored in affectedNSAndLabels + for _, peer := range nsLabelPeers { + nsGroupByLabelVal := groupNamespacesByLabelValue(affectedNSAndLabels, peer.Namespaces.SameLabels) + for labelValues, groupedNamespaces := range nsGroupByLabelVal { + peer, atgs, ags, selKeys := n.toAntreaPeerForSameLabelNamespaces(peer, cnp, atgPerAffectedNS, labelValues, groupedNamespaces) + clusterSetScopeSelectorKeys = clusterSetScopeSelectorKeys.Union(selKeys) + addRule(peer, ags, direction, atgs) + } + } + } else { + atgPerRuleAffectedNS := map[string]*antreatypes.AppliedToGroup{} + ruleAffectedNSLabels := map[string]map[string]string{} + for _, at := range cnpRule.AppliedTo { + if at.ServiceAccount != nil { + atg := n.createAppliedToGroup(at.ServiceAccount.Namespace, serviceAccountNameToPodSelector(at.ServiceAccount.Name), nil, nil, nil) + atgPerRuleAffectedNS[at.ServiceAccount.Namespace] = atg + ruleAffectedNSLabels[at.ServiceAccount.Namespace] = n.getNamespaceLabels(at.ServiceAccount.Namespace) + } else { + ruleAffectedNSLabels = n.getAffectedNamespacesForAppliedTo(at) + for ns := range ruleAffectedNSLabels { + atg := n.createAppliedToGroup(ns, at.PodSelector, nil, at.ExternalEntitySelector, nil) + atgPerRuleAffectedNS[ns] = atg + } + } + } + for _, peer := range nsLabelPeers { + nsGroupByLabelVal := groupNamespacesByLabelValue(ruleAffectedNSLabels, peer.Namespaces.SameLabels) + for labelValues, groupedNamespaces := range nsGroupByLabelVal { + peer, atgs, ags, selKeys := n.toAntreaPeerForSameLabelNamespaces(peer, cnp, atgPerRuleAffectedNS, labelValues, groupedNamespaces) + clusterSetScopeSelectorKeys = clusterSetScopeSelectorKeys.Union(selKeys) + addRule(peer, ags, direction, atgs) + } + } + } + } } } // Compute NetworkPolicyRules for Ingress Rules. @@ -484,14 +527,14 @@ func serviceAccountNameToPodSelector(saName string) *metav1.LabelSelector { func hasPerNamespaceRule(cnp *crdv1beta1.ClusterNetworkPolicy) bool { for _, ingress := range cnp.Spec.Ingress { for _, peer := range ingress.From { - if peer.Namespaces != nil && peer.Namespaces.Match == crdv1beta1.NamespaceMatchSelf { + if peer.Namespaces != nil { return true } } } for _, egress := range cnp.Spec.Egress { for _, peer := range egress.To { - if peer.Namespaces != nil && peer.Namespaces.Match == crdv1beta1.NamespaceMatchSelf { + if peer.Namespaces != nil { return true } } @@ -499,6 +542,97 @@ func hasPerNamespaceRule(cnp *crdv1beta1.ClusterNetworkPolicy) bool { return false } +func (n *NetworkPolicyController) getNamespaceLabels(ns string) map[string]string { + namespace, _ := n.namespaceLister.Get(ns) + return namespace.Labels +} + +// groupNamespaceByLabelValue groups Namespaces if they have the same label value for all the +// label keys listed. If a Namespace is missing at least one of the label keys, it will be +// not be grouped. Example: +// +// ns1: app=web, tier=test, tenant=t1 +// ns2: app=web, tier=test, tenant=t2 +// ns3: app=web, tier=production, tenant=t1 +// ns4: app=web, tier=production, tenant=t2 +// ns5: app=db, tenant=t1 +// labelKeys = [app, tier] +// Result after grouping: +// "web,test,": [ns1, ns2] +// "web,production,": [ns3, ns4] +func groupNamespacesByLabelValue(affectedNSAndLabels map[string]map[string]string, labelKeys []string) map[string][]string { + nsGroupedByLabelVal := map[string][]string{} + for ns, nsLabels := range affectedNSAndLabels { + if groupKey := getLabelValues(nsLabels, labelKeys); groupKey != labelValueUndefined { + nsGroupedByLabelVal[groupKey] = append(nsGroupedByLabelVal[groupKey], ns) + } + } + return nsGroupedByLabelVal +} + +func getLabelValues(labels map[string]string, labelKeys []string) string { + key := "" + for _, k := range labelKeys { + if v, ok := labels[k]; !ok { + return labelValueUndefined + } else { + key += v + labelValueSeparater + } + } + return key +} + +// labelKeyValPairsToSelector creates a LabelSelector based on a list of label keys +// and their expected values. +func labelKeyValPairsToSelector(labelKeys []string, labelValues string) *metav1.LabelSelector { + labelValuesSep := strings.Split(labelValues, labelValueSeparater) + labelMatchCriteria := map[string]string{} + for i := range labelKeys { + labelMatchCriteria[labelKeys[i]] = labelValuesSep[i] + } + return &metav1.LabelSelector{ + MatchLabels: labelMatchCriteria, + } +} + +// toAntreaPeerForSameLabelNamespaces computes the appliedToGroups and addressGroups for each +// group of Namespaces who have the same values for the sameLabels keys. +func (n *NetworkPolicyController) toAntreaPeerForSameLabelNamespaces(peer crdv1beta1.NetworkPolicyPeer, + np metav1.Object, atgPerAffectedNS map[string]*antreatypes.AppliedToGroup, + labelValues string, + namespacesByLabelValues []string) (*controlplane.NetworkPolicyPeer, []*antreatypes.AppliedToGroup, []*antreatypes.AddressGroup, sets.Set[string]) { + labelKeys := peer.Namespaces.SameLabels + var labelIdentities []uint32 + uniqueLabelIDs := map[uint32]struct{}{} + clusterSetScopeSelectorKeys := sets.New[string]() + // select Namespaces who, for specific label keys, have the same values as the appliedTo Namespaces. + nsSelForSameLabels := labelKeyValPairsToSelector(labelKeys, labelValues) + addressGroups := []*antreatypes.AddressGroup{n.createAddressGroup("", peer.PodSelector, nsSelForSameLabels, peer.ExternalEntitySelector, nil)} + if n.stretchNPEnabled && peer.Scope == crdv1beta1.ScopeClusterSet { + newClusterSetScopeSelector := antreatypes.NewGroupSelector("", peer.PodSelector, nsSelForSameLabels, peer.ExternalEntitySelector, nil) + clusterSetScopeSelectorKeys.Insert(newClusterSetScopeSelector.NormalizedName) + // In addition to getting the matched Label Identity IDs, AddSelector also registers the selector + // with the labelIdentityInterface. + matchedLabelIDs := n.labelIdentityInterface.AddSelector(newClusterSetScopeSelector, internalNetworkPolicyKeyFunc(np)) + for _, id := range matchedLabelIDs { + uniqueLabelIDs[id] = struct{}{} + } + } + for id := range uniqueLabelIDs { + labelIdentities = append(labelIdentities, id) + } + antreaPeer := &controlplane.NetworkPolicyPeer{ + AddressGroups: getAddressGroupNames(addressGroups), + LabelIdentities: labelIdentities, + } + var atgs []*antreatypes.AppliedToGroup + for _, ns := range namespacesByLabelValues { + atgForNamespace, _ := atgPerAffectedNS[ns] + atgs = append(atgs, atgForNamespace) + } + return antreaPeer, atgs, addressGroups, clusterSetScopeSelectorKeys +} + // processClusterAppliedTo processes appliedTo groups in Antrea ClusterNetworkPolicy set // at cluster level (appliedTo groups which will not need to be split by Namespaces). func (n *NetworkPolicyController) processClusterAppliedTo(appliedTo []crdv1beta1.AppliedTo) []*antreatypes.AppliedToGroup { @@ -525,32 +659,36 @@ func (n *NetworkPolicyController) processClusterAppliedTo(appliedTo []crdv1beta1 // splitPeersByScope splits the ClusterNetworkPolicy peers in the rule by whether the peer // is cluster-scoped or per-namespace. -func splitPeersByScope(rule crdv1beta1.Rule, dir controlplane.Direction) ([]crdv1beta1.NetworkPolicyPeer, []crdv1beta1.NetworkPolicyPeer) { - var clusterPeers, perNSPeers []crdv1beta1.NetworkPolicyPeer +func splitPeersByScope(rule crdv1beta1.Rule, dir controlplane.Direction) ([]crdv1beta1.NetworkPolicyPeer, []crdv1beta1.NetworkPolicyPeer, []crdv1beta1.NetworkPolicyPeer) { + var clusterPeers, perNSPeers, nsLabelPeers []crdv1beta1.NetworkPolicyPeer peers := rule.From if dir == controlplane.DirectionOut { peers = rule.To } for _, peer := range peers { - if peer.Namespaces != nil && peer.Namespaces.Match == crdv1beta1.NamespaceMatchSelf { - perNSPeers = append(perNSPeers, peer) + if peer.Namespaces != nil { + if peer.Namespaces.Match == crdv1beta1.NamespaceMatchSelf { + perNSPeers = append(perNSPeers, peer) + } else if len(peer.Namespaces.SameLabels) > 0 { + nsLabelPeers = append(nsLabelPeers, peer) + } } else { clusterPeers = append(clusterPeers, peer) } } - return clusterPeers, perNSPeers + return clusterPeers, perNSPeers, nsLabelPeers } // getAffectedNamespacesForAppliedTo computes the Namespaces currently affected by the appliedTo -// Namespace selectors. -func (n *NetworkPolicyController) getAffectedNamespacesForAppliedTo(appliedTo crdv1beta1.AppliedTo) []string { - var affectedNS []string +// Namespace selectors, and returns these Namespaces along with their labels. +func (n *NetworkPolicyController) getAffectedNamespacesForAppliedTo(appliedTo crdv1beta1.AppliedTo) map[string]map[string]string { + affectedNSAndLabels := map[string]map[string]string{} nsLabelSelector := appliedTo.NamespaceSelector if appliedTo.Group != "" { cg, err := n.cgLister.Get(appliedTo.Group) if err != nil { - return affectedNS + return affectedNSAndLabels } if cg.Spec.NamespaceSelector != nil || cg.Spec.PodSelector != nil { nsLabelSelector = cg.Spec.NamespaceSelector @@ -563,9 +701,9 @@ func (n *NetworkPolicyController) getAffectedNamespacesForAppliedTo(appliedTo cr } namespaces, _ := n.namespaceLister.List(nsSel) for _, ns := range namespaces { - affectedNS = append(affectedNS, ns.Name) + affectedNSAndLabels[ns.Name] = ns.Labels } - return affectedNS + return affectedNSAndLabels } // processInternalGroupForRule examines the internal group (and its childGroups if applicable) diff --git a/pkg/controller/networkpolicy/clusternetworkpolicy_test.go b/pkg/controller/networkpolicy/clusternetworkpolicy_test.go index 3562c861196..036df95358d 100644 --- a/pkg/controller/networkpolicy/clusternetworkpolicy_test.go +++ b/pkg/controller/networkpolicy/clusternetworkpolicy_test.go @@ -17,6 +17,8 @@ package networkpolicy import ( "fmt" "net" + "reflect" + "sort" "testing" "github.com/stretchr/testify/assert" @@ -33,6 +35,48 @@ import ( "antrea.io/antrea/pkg/util/k8s" ) +// ruleSemanticallyEqual compares two NetworkPolicyRule objects. It disregards +// the appliedToGroup slice element order as long as two rules' appliedToGroups +// have same elements. +func ruleSemanticallyEqual(a, b controlplane.NetworkPolicyRule) bool { + sort.Strings(a.AppliedToGroups) + sort.Strings(b.AppliedToGroups) + return reflect.DeepEqual(a, b) +} + +// diffNetworkPolicyRuleList checks if elements in two controlplane.NetworkPolicyRule +// slices are equal. If not, it returns the unmatched NetworkPolicyRules. +func diffNetworkPolicyRuleList(a, b []controlplane.NetworkPolicyRule) (extraA, extraB []controlplane.NetworkPolicyRule) { + if len(a) != len(b) { + return nil, nil + } + // Mark indexes in b that has already matched + visited := make([]bool, len(b)) + for i := 0; i < len(a); i++ { + found := false + for j := 0; j < len(b); j++ { + if visited[j] { + continue + } + if ruleSemanticallyEqual(a[i], b[j]) { + visited[j] = true + found = true + break + } + } + if !found { + extraA = append(extraA, a[i]) + } + } + for j := 0; j < len(b); j++ { + if visited[j] { + continue + } + extraB = append(extraB, b[j]) + } + return +} + func TestProcessClusterNetworkPolicy(t *testing.T) { p10 := float64(10) t10 := int32(10) @@ -55,6 +99,12 @@ func TestProcessClusterNetworkPolicy(t *testing.T) { Labels: map[string]string{"foo2": "bar2"}, }, } + nsC := v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nsC", + Labels: map[string]string{"foo2": "bar2"}, + }, + } svcA := v1.Service{ ObjectMeta: metav1.ObjectMeta{ @@ -796,6 +846,21 @@ func TestProcessClusterNetworkPolicy(t *testing.T) { Priority: 0, Action: &allowAction, }, + { + Direction: controlplane.DirectionIn, + AppliedToGroups: []string{getNormalizedUID(antreatypes.NewGroupSelector("nsC", nil, nil, nil, nil).NormalizedName)}, + From: controlplane.NetworkPolicyPeer{ + AddressGroups: []string{getNormalizedUID(antreatypes.NewGroupSelector("nsC", nil, nil, nil, nil).NormalizedName)}, + }, + Services: []controlplane.Service{ + { + Protocol: &protocolTCP, + Port: &int80, + }, + }, + Priority: 0, + Action: &allowAction, + }, { Direction: controlplane.DirectionIn, AppliedToGroups: []string{getNormalizedUID(antreatypes.NewGroupSelector("", nil, &metav1.LabelSelector{}, nil, nil).NormalizedName)}, @@ -815,12 +880,13 @@ func TestProcessClusterNetworkPolicy(t *testing.T) { AppliedToGroups: []string{ getNormalizedUID(antreatypes.NewGroupSelector("nsA", nil, nil, nil, nil).NormalizedName), getNormalizedUID(antreatypes.NewGroupSelector("nsB", nil, nil, nil, nil).NormalizedName), + getNormalizedUID(antreatypes.NewGroupSelector("nsC", nil, nil, nil, nil).NormalizedName), getNormalizedUID(antreatypes.NewGroupSelector("", nil, &metav1.LabelSelector{}, nil, nil).NormalizedName), }, AppliedToPerRule: true, }, - expectedAppliedToGroups: 3, - expectedAddressGroups: 3, + expectedAppliedToGroups: 4, + expectedAddressGroups: 4, }, { name: "with-per-namespace-rule-applied-to-per-rule", @@ -915,15 +981,103 @@ func TestProcessClusterNetworkPolicy(t *testing.T) { Priority: 1, Action: &dropAction, }, + { + Direction: controlplane.DirectionIn, + AppliedToGroups: []string{getNormalizedUID(antreatypes.NewGroupSelector("nsC", nil, nil, nil, nil).NormalizedName)}, + From: controlplane.NetworkPolicyPeer{ + AddressGroups: []string{getNormalizedUID(antreatypes.NewGroupSelector("nsC", nil, nil, nil, nil).NormalizedName)}, + }, + Services: []controlplane.Service{ + { + Protocol: &protocolTCP, + Port: &int81, + }, + }, + Priority: 1, + Action: &dropAction, + }, }, AppliedToGroups: []string{ getNormalizedUID(antreatypes.NewGroupSelector("nsA", &selectorA, nil, nil, nil).NormalizedName), getNormalizedUID(antreatypes.NewGroupSelector("nsB", nil, nil, nil, nil).NormalizedName), + getNormalizedUID(antreatypes.NewGroupSelector("nsC", nil, nil, nil, nil).NormalizedName), }, AppliedToPerRule: true, }, - expectedAppliedToGroups: 2, - expectedAddressGroups: 2, + expectedAppliedToGroups: 3, + expectedAddressGroups: 3, + }, + { + name: "with-same-labels-namespace-rule", + inputPolicy: &crdv1beta1.ClusterNetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{Name: "cnpS", UID: "uidS"}, + Spec: crdv1beta1.ClusterNetworkPolicySpec{ + AppliedTo: []crdv1beta1.AppliedTo{ + { + NamespaceSelector: &metav1.LabelSelector{}, + }, + }, + Priority: p10, + Ingress: []crdv1beta1.Rule{ + { + Ports: []crdv1beta1.NetworkPolicyPort{ + { + Port: &int80, + }, + }, + From: []crdv1beta1.NetworkPolicyPeer{ + { + Namespaces: &crdv1beta1.PeerNamespaces{ + SameLabels: []string{"foo2"}, + }, + }, + }, + Action: &allowAction, + }, + }, + }, + }, + expectedPolicy: &antreatypes.NetworkPolicy{ + UID: "uidS", + Name: "uidS", + SourceRef: &controlplane.NetworkPolicyReference{ + Type: controlplane.AntreaClusterNetworkPolicy, + Name: "cnpS", + UID: "uidS", + }, + Priority: &p10, + TierPriority: &DefaultTierPriority, + Rules: []controlplane.NetworkPolicyRule{ + { + Direction: controlplane.DirectionIn, + AppliedToGroups: []string{ + getNormalizedUID(antreatypes.NewGroupSelector("nsC", nil, nil, nil, nil).NormalizedName), + getNormalizedUID(antreatypes.NewGroupSelector("nsB", nil, nil, nil, nil).NormalizedName), + }, + From: controlplane.NetworkPolicyPeer{ + AddressGroups: []string{ + getNormalizedUID(antreatypes.NewGroupSelector("", nil, &selectorB, nil, nil).NormalizedName), + }, + }, + Services: []controlplane.Service{ + { + Protocol: &protocolTCP, + Port: &int80, + }, + }, + Priority: 0, + Action: &allowAction, + }, + }, + AppliedToGroups: []string{ + getNormalizedUID(antreatypes.NewGroupSelector("nsA", nil, nil, nil, nil).NormalizedName), + getNormalizedUID(antreatypes.NewGroupSelector("nsB", nil, nil, nil, nil).NormalizedName), + getNormalizedUID(antreatypes.NewGroupSelector("nsC", nil, nil, nil, nil).NormalizedName), + }, + AppliedToPerRule: true, + }, + expectedAppliedToGroups: 3, + expectedAddressGroups: 1, }, { name: "rule-with-to-service", @@ -1782,6 +1936,7 @@ func TestProcessClusterNetworkPolicy(t *testing.T) { c.cgStore.Add(&cgA) c.namespaceStore.Add(&nsA) c.namespaceStore.Add(&nsB) + c.namespaceStore.Add(&nsC) c.serviceStore.Add(&svcA) c.tierStore.Add(&tierA) actualPolicy, actualAppliedToGroups, actualAddressGroups := c.processClusterNetworkPolicy(tt.inputPolicy) @@ -1791,7 +1946,10 @@ func TestProcessClusterNetworkPolicy(t *testing.T) { assert.Equal(t, tt.expectedPolicy.Priority, actualPolicy.Priority) assert.Equal(t, tt.expectedPolicy.TierPriority, actualPolicy.TierPriority) assert.Equal(t, tt.expectedPolicy.AppliedToPerRule, actualPolicy.AppliedToPerRule) - assert.ElementsMatch(t, tt.expectedPolicy.Rules, actualPolicy.Rules) + missingExpectedRules, extraActualRules := diffNetworkPolicyRuleList(tt.expectedPolicy.Rules, actualPolicy.Rules) + if len(missingExpectedRules) > 0 || len(extraActualRules) > 0 { + t.Errorf("Unexpected rules in processed policy. Missing expected rules: %v. Extra actual rules: %v", missingExpectedRules, extraActualRules) + } assert.ElementsMatch(t, tt.expectedPolicy.AppliedToGroups, actualPolicy.AppliedToGroups) assert.Equal(t, tt.expectedAppliedToGroups, len(actualAppliedToGroups)) assert.Equal(t, tt.expectedAddressGroups, len(actualAddressGroups)) From cd8768dca0a94fcdfb7f6c47f5d5be757263986e Mon Sep 17 00:00:00 2001 From: Dyanngg Date: Thu, 5 Jan 2023 21:53:05 -0800 Subject: [PATCH 2/7] Refactor policy e2e framework for same labels tests Signed-off-by: Dyanngg --- multicluster/test/e2e/antreapolicy_test.go | 15 +- test/e2e/antreapolicy_test.go | 1177 +++++++++++--------- test/e2e/clustergroup_test.go | 14 +- test/e2e/group_test.go | 40 +- test/e2e/k8s_util.go | 60 +- test/e2e/utils/cnp_spec_builder.go | 17 +- 6 files changed, 710 insertions(+), 613 deletions(-) diff --git a/multicluster/test/e2e/antreapolicy_test.go b/multicluster/test/e2e/antreapolicy_test.go index e35d6b8e5a0..88df87af282 100644 --- a/multicluster/test/e2e/antreapolicy_test.go +++ b/multicluster/test/e2e/antreapolicy_test.go @@ -35,7 +35,7 @@ const ( var ( allPodsPerCluster []antreae2e.Pod perNamespacePods []string - perClusterNamespaces map[string]string + perClusterNamespaces map[string]antreae2e.TestNamespaceMeta podsByNamespace map[string][]antreae2e.Pod clusterK8sUtilsMap map[string]*antreae2e.KubernetesUtils ) @@ -53,10 +53,11 @@ func failOnError(err error, t *testing.T) { // initializeForPolicyTest creates three Pods in three test Namespaces for each test cluster. func initializeForPolicyTest(t *testing.T, data *MCTestData) { perNamespacePods = []string{"a", "b", "c"} - perClusterNamespaces = make(map[string]string) - perClusterNamespaces["x"] = "x" - perClusterNamespaces["y"] = "y" - perClusterNamespaces["z"] = "z" + perClusterNamespaces = make(map[string]antreae2e.TestNamespaceMeta) + nss := []string{"x", "y", "z"} + for _, ns := range nss { + perClusterNamespaces[ns] = antreae2e.TestNamespaceMeta{Name: ns} + } allPodsPerCluster = []antreae2e.Pod{} podsByNamespace = make(map[string][]antreae2e.Pod) @@ -64,8 +65,8 @@ func initializeForPolicyTest(t *testing.T, data *MCTestData) { for _, podName := range perNamespacePods { for _, ns := range perClusterNamespaces { - allPodsPerCluster = append(allPodsPerCluster, antreae2e.NewPod(ns, podName)) - podsByNamespace[ns] = append(podsByNamespace[ns], antreae2e.NewPod(ns, podName)) + allPodsPerCluster = append(allPodsPerCluster, antreae2e.NewPod(ns.Name, podName)) + podsByNamespace[ns.Name] = append(podsByNamespace[ns.Name], antreae2e.NewPod(ns.Name, podName)) } } for clusterName := range data.clusterTestDataMap { diff --git a/test/e2e/antreapolicy_test.go b/test/e2e/antreapolicy_test.go index cef8e0b4639..dd28e6cdc02 100644 --- a/test/e2e/antreapolicy_test.go +++ b/test/e2e/antreapolicy_test.go @@ -50,10 +50,11 @@ var ( k8sUtils *KubernetesUtils allTestList []*TestCase pods []string - namespaces map[string]string + namespaces map[string]TestNamespaceMeta podIPs map[string][]string p80, p81, p8080, p8081, p8082, p8085, p6443 int32 nodes map[string]string + selfNamespace *crdv1beta1.PeerNamespaces ) const ( @@ -65,32 +66,13 @@ const ( // Verification of deleting/creating resources timed out. timeout = 10 * time.Second // audit log directory on Antrea Agent - logDir = "/var/log/antrea/networkpolicy/" - logfileName = "np.log" - defaultTierName = "application" + logDir = "/var/log/antrea/networkpolicy/" + logfileName = "np.log" + defaultTierName = "application" + formFactorNormal = "3by3PodWorkloads" + formFactorLarge = "extraNamespaces" ) -// TestAntreaPolicyStats is the top-level test which contains all subtests for -// AntreaPolicyStats related test cases so they can share setup, teardown. -func TestAntreaPolicyStats(t *testing.T) { - skipIfHasWindowsNodes(t) - skipIfAntreaPolicyDisabled(t) - skipIfNetworkPolicyStatsDisabled(t) - - data, err := setupTest(t) - if err != nil { - t.Fatalf("Error when setting up test: %v", err) - } - defer teardownTest(t, data) - - t.Run("testANNPNetworkPolicyStatsWithDropAction", func(t *testing.T) { - testANNPNetworkPolicyStatsWithDropAction(t, data) - }) - t.Run("testAntreaClusterNetworkPolicyStats", func(t *testing.T) { - testAntreaClusterNetworkPolicyStats(t, data) - }) -} - func failOnError(err error, t *testing.T) { if err != nil { log.Errorf("%+v", err) @@ -108,19 +90,82 @@ type podToAddrTestStep struct { expectedConnectivity PodConnectivityMark } -func initialize(t *testing.T, data *TestData) { +// Util function to get the runtime name of a test Namespace. +func getNS(ns string) string { + return namespaces[ns].Name +} + +// Util function to get the runtime Pod struct of a test Pod. +func getPod(ns, po string) Pod { + return Pod(namespaces[ns].Name + "/" + po) +} + +// Util function to get the runtime Pod name of a test Pod. +func getPodName(ns, po string) string { + return namespaces[ns].Name + "/" + po +} + +// initNamespaceMeta populates the test Namespaces metadata. +// There are two form factors for test workload Namespaces: +// +// Normal: three Namespaces x, y, z. +// Large: two "prod" Namespaces labeled purpose=test and tier=prod. +// two "dev" Namespaces labeled purpose=test and tier=dev. +// one "no-tier-label" Namespace labeled purpose=test. +// +// The large form factor workloads are used for testcases where advanced +// Namespace matching in policies are required. +func initNamespaceMeta(formFactor string) map[string]TestNamespaceMeta { + allNamespaceMeta := make(map[string]TestNamespaceMeta) + suffix := randName("") + if formFactor == formFactorLarge { + for i := 1; i < 3; i++ { + prodNS := TestNamespaceMeta{ + Name: "prod" + strconv.Itoa(i) + "-" + suffix, + Labels: map[string]string{ + "purpose": "test", + "tier": "prod", + }, + } + allNamespaceMeta["prod"+strconv.Itoa(i)] = prodNS + devNS := TestNamespaceMeta{ + Name: "dev" + strconv.Itoa(i) + "-" + suffix, + Labels: map[string]string{ + "purpose": "test", + "tier": "dev", + }, + } + allNamespaceMeta["dev"+strconv.Itoa(i)] = devNS + } + allNamespaceMeta["no-tier-label"] = TestNamespaceMeta{ + Name: "no-tier-label-" + suffix, + Labels: map[string]string{ + "purpose": "test", + }, + } + } else if formFactor == formFactorNormal { + nss := []string{"x", "y", "z"} + for _, ns := range nss { + allNamespaceMeta[ns] = TestNamespaceMeta{ + Name: ns + "-" + suffix, + } + } + } + return allNamespaceMeta +} + +func initialize(t *testing.T, data *TestData, formFactor string) { p80 = 80 p81 = 81 p8080 = 8080 p8081 = 8081 p8082 = 8082 p8085 = 8085 + selfNamespace = &crdv1beta1.PeerNamespaces{ + Match: crdv1beta1.NamespaceMatchSelf, + } pods = []string{"a", "b", "c"} - namespaces = make(map[string]string) - suffix := randName("") - namespaces["x"] = "x-" + suffix - namespaces["y"] = "y-" + suffix - namespaces["z"] = "z-" + suffix + namespaces = initNamespaceMeta(formFactor) // This function "initialize" will be used more than once, and variable "allPods" is global. // It should be empty every time when "initialize" is performed, otherwise there will be unexpected // results. @@ -129,8 +174,8 @@ func initialize(t *testing.T, data *TestData) { for _, podName := range pods { for _, ns := range namespaces { - allPods = append(allPods, NewPod(ns, podName)) - podsByNamespace[ns] = append(podsByNamespace[ns], NewPod(ns, podName)) + allPods = append(allPods, NewPod(ns.Name, podName)) + podsByNamespace[ns.Name] = append(podsByNamespace[ns.Name], NewPod(ns.Name, podName)) } } skipIfAntreaPolicyDisabled(t) @@ -148,13 +193,13 @@ func skipIfAntreaPolicyDisabled(tb testing.TB) { skipIfFeatureDisabled(tb, features.AntreaPolicy, true, true) } -func applyDefaultDenyToAllNamespaces(k8s *KubernetesUtils, namespaces map[string]string) error { +func applyDefaultDenyToAllNamespaces(k8s *KubernetesUtils, namespaces map[string]TestNamespaceMeta) error { if err := k8s.CleanNetworkPolicies(namespaces); err != nil { return err } for _, ns := range namespaces { builder := &NetworkPolicySpecBuilder{} - builder = builder.SetName(ns, "default-deny-namespace") + builder = builder.SetName(ns.Name, "default-deny-namespace") builder.SetTypeIngress() if _, err := k8s.CreateOrUpdateNetworkPolicy(builder.Get()); err != nil { return err @@ -170,7 +215,7 @@ func applyDefaultDenyToAllNamespaces(k8s *KubernetesUtils, namespaces map[string return nil } -func cleanupDefaultDenyNPs(k8s *KubernetesUtils, namespaces map[string]string) error { +func cleanupDefaultDenyNPs(k8s *KubernetesUtils, namespaces map[string]TestNamespaceMeta) error { if err := k8s.CleanNetworkPolicies(namespaces); err != nil { return err } @@ -191,7 +236,6 @@ func testMutateACNPNoTier(t *testing.T) { SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}). SetPriority(10.0) acnp := builder.Get() - log.Debugf("creating ACNP %v", acnp.Name) acnp, err := k8sUtils.CreateOrUpdateACNP(acnp) if err != nil { failOnError(fmt.Errorf("ACNP create failed %v", err), t) @@ -205,11 +249,10 @@ func testMutateACNPNoTier(t *testing.T) { func testMutateANNPNoTier(t *testing.T) { invalidNpErr := fmt.Errorf("ANNP tier not mutated to default tier") builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["x"], "annp-no-tier"). + builder = builder.SetName(getNS("x"), "anp-no-tier"). SetAppliedToGroup([]ANNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}). SetPriority(10.0) annp := builder.Get() - log.Debugf("creating ANNP %v", annp.Name) annp, err := k8sUtils.CreateOrUpdateANNP(annp) if err != nil { failOnError(fmt.Errorf("ANNP create failed %v", err), t) @@ -228,7 +271,6 @@ func testCreateValidationInvalidACNP(t *testing.T) { SetPriority(1.0). SetTier("no-exist") acnp := builder.Get() - log.Debugf("creating ACNP %v", acnp.Name) if _, err := k8sUtils.CreateOrUpdateACNP(acnp); err == nil { // Above creation of ACNP must fail as it is an invalid spec. failOnError(invalidNpErr, t) @@ -242,14 +284,14 @@ func testUpdateValidationInvalidACNP(t *testing.T) { SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}). SetPriority(1.0) builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, - nil, nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) acnp := builder.Get() if _, err := k8sUtils.CreateOrUpdateACNP(acnp); err != nil { failOnError(fmt.Errorf("create ACNP acnp-applied-to-update failed: %v", err), t) } builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "c"}, nil, - nil, nil, nil, nil, false, []ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "b"}}}, crdv1beta1.RuleActionAllow, "", "", nil) + nil, nil, nil, nil, nil, []ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "b"}}}, crdv1beta1.RuleActionAllow, "", "", nil) acnp = builder.Get() if _, err := k8sUtils.CreateOrUpdateACNP(acnp); err == nil { // Above update of ACNP must fail as it is an invalid spec. @@ -261,7 +303,7 @@ func testUpdateValidationInvalidACNP(t *testing.T) { func testCreateValidationInvalidANNP(t *testing.T) { invalidNpErr := fmt.Errorf("invalid Antrea NetworkPolicy with non-exist tier accepted") builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["x"], "annp-no-priority"). + builder = builder.SetName(getNS("x"), "annp-no-priority"). SetAppliedToGroup([]ANNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}). SetPriority(1.0). SetTier("non-exist") @@ -276,7 +318,7 @@ func testCreateValidationInvalidANNP(t *testing.T) { func testUpdateValidationInvalidANNP(t *testing.T) { invalidNpErr := fmt.Errorf("invalid Antrea NetworkPolicy appliedTo set in both spec and rules accepted") builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["x"], "annp-applied-to-update"). + builder = builder.SetName(getNS("x"), "annp-applied-to-update"). SetAppliedToGroup([]ANNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}). SetPriority(1.0) builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "c"}, nil, nil, @@ -343,7 +385,7 @@ func testCreateValidationInvalidCG(t *testing.T) { cgBuilder := &ClusterGroupSpecBuilder{} cgBuilder = cgBuilder.SetName("cg-mix-peer"). SetPodSelector(map[string]string{"pod": "a"}, nil). - SetServiceReference("svc", namespaces["x"]) + SetServiceReference("svc", getNS("x")) cg := cgBuilder.Get() if _, err := k8sUtils.CreateOrUpdateCG(cg); err == nil { // Above creation of ClusterGroup must fail as it is an invalid spec. @@ -360,7 +402,7 @@ func testUpdateValidationInvalidCG(t *testing.T) { if _, err := k8sUtils.CreateOrUpdateCG(cg); err != nil { failOnError(fmt.Errorf("create ClusterGroup %s failed: %v", cg.Name, err), t) } - cgBuilder.SetServiceReference("svc", namespaces["x"]) + cgBuilder.SetServiceReference("svc", getNS("x")) cg = cgBuilder.Get() if _, err := k8sUtils.CreateOrUpdateCG(cg); err == nil { // Above update of ClusterGroup must fail as it is an invalid spec. @@ -372,9 +414,9 @@ func testUpdateValidationInvalidCG(t *testing.T) { func testCreateValidationInvalidGroup(t *testing.T) { invalidErr := fmt.Errorf("Group using podSelecter and serviceReference together created") gBuilder := &GroupSpecBuilder{} - gBuilder = gBuilder.SetName("g-mix-peer").SetNamespace(namespaces["x"]). + gBuilder = gBuilder.SetName("g-mix-peer").SetNamespace(getNS("x")). SetPodSelector(map[string]string{"pod": "a"}, nil). - SetServiceReference("svc", namespaces["x"]) + SetServiceReference("svc", getNS("x")) g := gBuilder.Get() if _, err := k8sUtils.CreateOrUpdateGroup(g); err == nil { // Above creation of Group must fail as it is an invalid spec. @@ -385,13 +427,13 @@ func testCreateValidationInvalidGroup(t *testing.T) { func testUpdateValidationInvalidGroup(t *testing.T) { invalidErr := fmt.Errorf("Group using podSelecter and serviceReference together updated") gBuilder := &GroupSpecBuilder{} - gBuilder = gBuilder.SetName("g-mix-peer").SetNamespace(namespaces["x"]). + gBuilder = gBuilder.SetName("g-mix-peer").SetNamespace(getNS("x")). SetPodSelector(map[string]string{"pod": "a"}, nil) g := gBuilder.Get() if _, err := k8sUtils.CreateOrUpdateGroup(g); err != nil { failOnError(fmt.Errorf("create Group %s/%s failed: %v", g.Namespace, g.Name, err), t) } - gBuilder.SetServiceReference("svc", namespaces["x"]) + gBuilder.SetServiceReference("svc", getNS("x")) g = gBuilder.Get() if _, err := k8sUtils.CreateOrUpdateGroup(g); err == nil { // Above update of Group must fail as it is an invalid spec. @@ -407,13 +449,13 @@ func testACNPAllowXBtoA(t *testing.T) { builder = builder.SetName("acnp-allow-xb-to-a"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["x"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": getNS("x")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) reachability := NewReachability(allPods, Dropped) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["x"]+"/a"), Connected) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Connected) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["z"]+"/a"), Connected) + reachability.Expect(getPod("x", "b"), getPod("x", "a"), Connected) + reachability.Expect(getPod("x", "b"), getPod("y", "a"), Connected) + reachability.Expect(getPod("x", "b"), getPod("z", "a"), Connected) reachability.ExpectSelf(allPods, Connected) testStep := []*TestStep{ @@ -439,33 +481,33 @@ func testACNPAllowXBtoA(t *testing.T) { // the client Pod and uses it in sourcePort and sourceEndPort of an ACNP rule to verify that // packets can be matched by source port. func testACNPSourcePort(t *testing.T) { - portStart, portEnd, err := k8sUtils.getTCPv4SourcePortRangeFromPod(namespaces["x"], "a") + portStart, portEnd, err := k8sUtils.getTCPv4SourcePortRangeFromPod(getNS("x"), "a") failOnError(err, t) builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-source-port"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddIngressForSrcPort(ProtocolTCP, nil, nil, &portStart, &portEnd, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["x"]}, + builder.AddIngressForSrcPort(ProtocolTCP, nil, nil, &portStart, &portEnd, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": getNS("x")}, nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-source-port"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder2.AddIngressForSrcPort(ProtocolTCP, &p80, nil, &portStart, &portEnd, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["x"]}, + builder2.AddIngressForSrcPort(ProtocolTCP, &p80, nil, &portStart, &portEnd, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": getNS("x")}, nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) builder3 := &ClusterNetworkPolicySpecBuilder{} builder3 = builder3.SetName("acnp-source-port"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder3.AddIngressForSrcPort(ProtocolTCP, &p80, &p81, &portStart, &portEnd, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["x"]}, + builder3.AddIngressForSrcPort(ProtocolTCP, &p80, &p81, &portStart, &portEnd, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": getNS("x")}, nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["x"]+"/a"), Dropped) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["z"]+"/a"), Dropped) + reachability.Expect(Pod(getNS("x")+"/b"), Pod(getNS("x")+"/a"), Dropped) + reachability.Expect(Pod(getNS("x")+"/b"), Pod(getNS("y")+"/a"), Dropped) + reachability.Expect(Pod(getNS("x")+"/b"), Pod(getNS("z")+"/a"), Dropped) // After adding the dst port constraint of port 80, traffic on port 81 should not be affected. updatedReachability := NewReachability(allPods, Connected) @@ -511,12 +553,12 @@ func testACNPAllowXBtoYA(t *testing.T) { builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-allow-xb-to-ya"). SetPriority(2.0). - SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["y"]}}}) - builder.AddIngress(ProtocolTCP, nil, &port81Name, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["x"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": getNS("y")}}}) + builder.AddIngress(ProtocolTCP, nil, &port81Name, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": getNS("x")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) reachability := NewReachability(allPods, Dropped) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Connected) + reachability.Expect(getPod("x", "b"), getPod("y", "a"), Connected) reachability.ExpectSelf(allPods, Connected) testStep := []*TestStep{ @@ -543,25 +585,25 @@ func testACNPPriorityOverrideDefaultDeny(t *testing.T) { builder1 := &ClusterNetworkPolicySpecBuilder{} builder1 = builder1.SetName("acnp-priority2"). SetPriority(2). - SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) - builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}) + builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-priority1"). SetPriority(1). - SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}}) - builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": getNS("x")}}}) + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) // Ingress from ns:z to x/a will be dropped since acnp-priority1 has higher precedence. reachabilityBothACNP := NewReachability(allPods, Dropped) - reachabilityBothACNP.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/b"), Connected) - reachabilityBothACNP.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/c"), Connected) - reachabilityBothACNP.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/b"), Connected) - reachabilityBothACNP.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/c"), Connected) - reachabilityBothACNP.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["x"]+"/b"), Connected) - reachabilityBothACNP.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["x"]+"/c"), Connected) + reachabilityBothACNP.Expect(getPod("z", "a"), getPod("x", "b"), Connected) + reachabilityBothACNP.Expect(getPod("z", "a"), getPod("x", "c"), Connected) + reachabilityBothACNP.Expect(getPod("z", "b"), getPod("x", "b"), Connected) + reachabilityBothACNP.Expect(getPod("z", "b"), getPod("x", "c"), Connected) + reachabilityBothACNP.Expect(getPod("z", "c"), getPod("x", "b"), Connected) + reachabilityBothACNP.Expect(getPod("z", "c"), getPod("x", "c"), Connected) reachabilityBothACNP.ExpectSelf(allPods, Connected) testStep := []*TestStep{ @@ -594,11 +636,11 @@ func testACNPAllowNoDefaultIsolation(t *testing.T, protocol AntreaPolicyProtocol builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-allow-x-ingress-y-egress-z"). SetPriority(1.1). - SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) - builder.AddIngress(protocol, &p81, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["y"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) - builder.AddEgress(protocol, &p81, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}) + builder.AddIngress(protocol, &p81, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("y")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) + builder.AddEgress(protocol, &p81, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) reachability := NewReachability(allPods, Connected) testStep := []*TestStep{ @@ -632,14 +674,14 @@ func testACNPDropEgress(t *testing.T, protocol AntreaPolicyProtocol) { builder = builder.SetName("acnp-deny-a-to-z-egress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddEgress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder.AddEgress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachability := NewReachability(allPods, Connected) - reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) - reachability.ExpectEgressToNamespace(Pod(namespaces["y"]+"/a"), namespaces["z"], Dropped) - reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/b"), Dropped) - reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/c"), Dropped) + reachability.ExpectEgressToNamespace(getPod("x", "a"), getNS("z"), Dropped) + reachability.ExpectEgressToNamespace(getPod("y", "a"), getNS("z"), Dropped) + reachability.Expect(getPod("z", "a"), getPod("z", "b"), Dropped) + reachability.Expect(getPod("z", "a"), getPod("z", "c"), Dropped) testStep := []*TestStep{ { "Port 80", @@ -664,14 +706,14 @@ func testACNPDropIngressInSelectedNamespace(t *testing.T) { builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-deny-ingress-to-x"). SetPriority(1.0). - SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, false, nil, + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}) + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "drop-all-ingress", nil) reachability := NewReachability(allPods, Connected) - reachability.ExpectAllIngress(Pod(namespaces["x"]+"/a"), Dropped) - reachability.ExpectAllIngress(Pod(namespaces["x"]+"/b"), Dropped) - reachability.ExpectAllIngress(Pod(namespaces["x"]+"/c"), Dropped) + reachability.ExpectAllIngress(getPod("x", "a"), Dropped) + reachability.ExpectAllIngress(getPod("x", "b"), Dropped) + reachability.ExpectAllIngress(getPod("x", "c"), Dropped) reachability.ExpectSelf(allPods, Connected) testStep := []*TestStep{ { @@ -696,18 +738,18 @@ func testACNPNoEffectOnOtherProtocols(t *testing.T) { builder = builder.SetName("acnp-deny-a-to-z-ingress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachability1 := NewReachability(allPods, Connected) - reachability1.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/a"), Dropped) - reachability1.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/a"), Dropped) - reachability1.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["x"]+"/a"), Dropped) - reachability1.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["y"]+"/a"), Dropped) - reachability1.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped) - reachability1.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["y"]+"/a"), Dropped) - reachability1.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["z"]+"/a"), Dropped) - reachability1.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["z"]+"/a"), Dropped) + reachability1.Expect(getPod("z", "a"), getPod("x", "a"), Dropped) + reachability1.Expect(getPod("z", "b"), getPod("x", "a"), Dropped) + reachability1.Expect(getPod("z", "c"), getPod("x", "a"), Dropped) + reachability1.Expect(getPod("z", "a"), getPod("y", "a"), Dropped) + reachability1.Expect(getPod("z", "b"), getPod("y", "a"), Dropped) + reachability1.Expect(getPod("z", "c"), getPod("y", "a"), Dropped) + reachability1.Expect(getPod("z", "b"), getPod("z", "a"), Dropped) + reachability1.Expect(getPod("z", "c"), getPod("z", "a"), Dropped) reachability2 := NewReachability(allPods, Connected) @@ -742,18 +784,18 @@ func testACNPAppliedToDenyXBtoCGWithYA(t *testing.T) { cgName := "cg-pods-ya" cgBuilder := &ClusterGroupSpecBuilder{} cgBuilder = cgBuilder.SetName(cgName). - SetNamespaceSelector(map[string]string{"ns": namespaces["y"]}, nil). + SetNamespaceSelector(map[string]string{"ns": getNS("y")}, nil). SetPodSelector(map[string]string{"pod": "a"}, nil) port81Name := "serve-81" builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-deny-cg-with-ya-from-xb"). SetPriority(2.0). SetAppliedToGroup([]ACNPAppliedToSpec{{Group: cgName}}) - builder.AddIngress(ProtocolTCP, nil, &port81Name, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["x"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder.AddIngress(ProtocolTCP, nil, &port81Name, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": getNS("x")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped) + reachability.Expect(getPod("x", "b"), getPod("y", "a"), Dropped) reachability.ExpectSelf(allPods, Connected) testStep := []*TestStep{ @@ -779,18 +821,18 @@ func testACNPIngressRuleDenyCGWithXBtoYA(t *testing.T) { cgName := "cg-pods-xb" cgBuilder := &ClusterGroupSpecBuilder{} cgBuilder = cgBuilder.SetName(cgName). - SetNamespaceSelector(map[string]string{"ns": namespaces["x"]}, nil). + SetNamespaceSelector(map[string]string{"ns": getNS("x")}, nil). SetPodSelector(map[string]string{"pod": "b"}, nil) port81Name := "serve-81" builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-deny-cg-with-xb-to-ya"). SetPriority(2.0). - SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["y"]}}}) + SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": getNS("y")}}}) builder.AddIngress(ProtocolTCP, nil, &port81Name, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) + nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped) + reachability.Expect(getPod("x", "b"), getPod("y", "a"), Dropped) reachability.ExpectSelf(allPods, Connected) testStep := []*TestStep{ @@ -818,14 +860,14 @@ func testACNPAppliedToRuleCGWithPodsAToNsZ(t *testing.T) { builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-deny-cg-with-a-to-z"). SetPriority(1.0) - builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, []ACNPAppliedToSpec{{Group: cgName}}, crdv1beta1.RuleActionDrop, "", "", nil) + builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, []ACNPAppliedToSpec{{Group: cgName}}, crdv1beta1.RuleActionDrop, "", "", nil) reachability := NewReachability(allPods, Connected) - reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) - reachability.ExpectEgressToNamespace(Pod(namespaces["y"]+"/a"), namespaces["z"], Dropped) - reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/b"), Dropped) - reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/c"), Dropped) + reachability.ExpectEgressToNamespace(getPod("x", "a"), getNS("z"), Dropped) + reachability.ExpectEgressToNamespace(getPod("y", "a"), getNS("z"), Dropped) + reachability.Expect(getPod("z", "a"), getPod("z", "b"), Dropped) + reachability.Expect(getPod("z", "a"), getPod("z", "c"), Dropped) testStep := []*TestStep{ { "Port 80", @@ -848,19 +890,19 @@ func testACNPAppliedToRuleCGWithPodsAToNsZ(t *testing.T) { func testACNPEgressRulePodsAToCGWithNsZ(t *testing.T) { cgName := "cg-ns-z" cgBuilder := &ClusterGroupSpecBuilder{} - cgBuilder = cgBuilder.SetName(cgName).SetNamespaceSelector(map[string]string{"ns": namespaces["z"]}, nil) + cgBuilder = cgBuilder.SetName(cgName).SetNamespaceSelector(map[string]string{"ns": getNS("z")}, nil) builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-deny-a-to-cg-with-z-egress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) reachability := NewReachability(allPods, Connected) - reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) - reachability.ExpectEgressToNamespace(Pod(namespaces["y"]+"/a"), namespaces["z"], Dropped) - reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/b"), Dropped) - reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/c"), Dropped) + reachability.ExpectEgressToNamespace(getPod("x", "a"), getNS("z"), Dropped) + reachability.ExpectEgressToNamespace(getPod("y", "a"), getNS("z"), Dropped) + reachability.Expect(getPod("z", "a"), getPod("z", "b"), Dropped) + reachability.Expect(getPod("z", "a"), getPod("z", "c"), Dropped) testStep := []*TestStep{ { "Port 80", @@ -890,20 +932,20 @@ func testACNPClusterGroupUpdateAppliedTo(t *testing.T) { builder = builder.SetName("acnp-deny-cg-with-a-to-z-egress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{Group: cgName}}) - builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachability := NewReachability(allPods, Connected) - reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) - reachability.ExpectEgressToNamespace(Pod(namespaces["y"]+"/a"), namespaces["z"], Dropped) - reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/b"), Dropped) - reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/c"), Dropped) + reachability.ExpectEgressToNamespace(getPod("x", "a"), getNS("z"), Dropped) + reachability.ExpectEgressToNamespace(getPod("y", "a"), getNS("z"), Dropped) + reachability.Expect(getPod("z", "a"), getPod("z", "b"), Dropped) + reachability.Expect(getPod("z", "a"), getPod("z", "c"), Dropped) updatedReachability := NewReachability(allPods, Connected) - updatedReachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/c"), namespaces["z"], Dropped) - updatedReachability.ExpectEgressToNamespace(Pod(namespaces["y"]+"/c"), namespaces["z"], Dropped) - updatedReachability.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["z"]+"/a"), Dropped) - updatedReachability.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["z"]+"/b"), Dropped) + updatedReachability.ExpectEgressToNamespace(getPod("x", "c"), getNS("z"), Dropped) + updatedReachability.ExpectEgressToNamespace(getPod("y", "c"), getNS("z"), Dropped) + updatedReachability.Expect(getPod("z", "c"), getPod("z", "a"), Dropped) + updatedReachability.Expect(getPod("z", "c"), getPod("z", "b"), Dropped) testStep := []*TestStep{ { "CG Pods A", @@ -933,28 +975,28 @@ func testACNPClusterGroupUpdateAppliedTo(t *testing.T) { func testACNPClusterGroupUpdate(t *testing.T) { cgName := "cg-ns-z-then-y" cgBuilder := &ClusterGroupSpecBuilder{} - cgBuilder = cgBuilder.SetName(cgName).SetNamespaceSelector(map[string]string{"ns": namespaces["z"]}, nil) + cgBuilder = cgBuilder.SetName(cgName).SetNamespaceSelector(map[string]string{"ns": getNS("z")}, nil) // Update CG NS selector to group Pods from Namespace Y updatedCgBuilder := &ClusterGroupSpecBuilder{} - updatedCgBuilder = updatedCgBuilder.SetName(cgName).SetNamespaceSelector(map[string]string{"ns": namespaces["y"]}, nil) + updatedCgBuilder = updatedCgBuilder.SetName(cgName).SetNamespaceSelector(map[string]string{"ns": getNS("y")}, nil) builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-deny-a-to-cg-with-z-egress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) reachability := NewReachability(allPods, Connected) - reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) - reachability.ExpectEgressToNamespace(Pod(namespaces["y"]+"/a"), namespaces["z"], Dropped) - reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/b"), Dropped) - reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/c"), Dropped) + reachability.ExpectEgressToNamespace(getPod("x", "a"), getNS("z"), Dropped) + reachability.ExpectEgressToNamespace(getPod("y", "a"), getNS("z"), Dropped) + reachability.Expect(getPod("z", "a"), getPod("z", "b"), Dropped) + reachability.Expect(getPod("z", "a"), getPod("z", "c"), Dropped) updatedReachability := NewReachability(allPods, Connected) - updatedReachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["y"], Dropped) - updatedReachability.ExpectEgressToNamespace(Pod(namespaces["z"]+"/a"), namespaces["y"], Dropped) - updatedReachability.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["y"]+"/b"), Dropped) - updatedReachability.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["y"]+"/c"), Dropped) + updatedReachability.ExpectEgressToNamespace(getPod("x", "a"), getNS("y"), Dropped) + updatedReachability.ExpectEgressToNamespace(getPod("z", "a"), getNS("y"), Dropped) + updatedReachability.Expect(getPod("y", "a"), getPod("y", "b"), Dropped) + updatedReachability.Expect(getPod("y", "a"), getPod("y", "c"), Dropped) testStep := []*TestStep{ { "Port 80", @@ -985,22 +1027,22 @@ func testACNPClusterGroupAppliedToPodAdd(t *testing.T, data *TestData) { cgName := "cg-pod-custom-pod-zj" cgBuilder := &ClusterGroupSpecBuilder{} cgBuilder = cgBuilder.SetName(cgName). - SetNamespaceSelector(map[string]string{"ns": namespaces["z"]}, nil). + SetNamespaceSelector(map[string]string{"ns": getNS("z")}, nil). SetPodSelector(map[string]string{"pod": "j"}, nil) builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-deny-cg-with-zj-to-xj-egress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{Group: cgName}}) - builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "j"}, nil, map[string]string{"ns": namespaces["x"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "j"}, nil, map[string]string{"ns": getNS("x")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) cp := []*CustomProbe{ { SourcePod: CustomPod{ - Pod: NewPod(namespaces["z"], "j"), + Pod: NewPod(getNS("z"), "j"), Labels: map[string]string{"pod": "j"}, }, DestPod: CustomPod{ - Pod: NewPod(namespaces["x"], "j"), + Pod: NewPod(getNS("x"), "j"), Labels: map[string]string{"pod": "j"}, }, ExpectConnectivity: Dropped, @@ -1028,7 +1070,7 @@ func testACNPClusterGroupRefRulePodAdd(t *testing.T, data *TestData) { cgName := "cg-pod-custom-pod-zk" cgBuilder := &ClusterGroupSpecBuilder{} cgBuilder = cgBuilder.SetName(cgName). - SetNamespaceSelector(map[string]string{"ns": namespaces["z"]}, nil). + SetNamespaceSelector(map[string]string{"ns": getNS("z")}, nil). SetPodSelector(map[string]string{"pod": "k"}, nil) builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-deny-xk-to-cg-with-zk-egress"). @@ -1036,19 +1078,19 @@ func testACNPClusterGroupRefRulePodAdd(t *testing.T, data *TestData) { SetAppliedToGroup([]ACNPAppliedToSpec{ { PodSelector: map[string]string{"pod": "k"}, - NSSelector: map[string]string{"ns": namespaces["x"]}, + NSSelector: map[string]string{"ns": getNS("x")}, }, }) builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) cp := []*CustomProbe{ { SourcePod: CustomPod{ - Pod: NewPod(namespaces["x"], "k"), + Pod: NewPod(getNS("x"), "k"), Labels: map[string]string{"pod": "k"}, }, DestPod: CustomPod{ - Pod: NewPod(namespaces["z"], "k"), + Pod: NewPod(getNS("z"), "k"), Labels: map[string]string{"pod": "k"}, }, ExpectConnectivity: Dropped, @@ -1074,10 +1116,10 @@ func testACNPClusterGroupRefRulePodAdd(t *testing.T, data *TestData) { } func testACNPClusterGroupRefRuleIPBlocks(t *testing.T) { - podXAIP, _ := podIPs[namespaces["x"]+"/a"] - podXBIP, _ := podIPs[namespaces["x"]+"/b"] - podXCIP, _ := podIPs[namespaces["x"]+"/c"] - podZAIP, _ := podIPs[namespaces["z"]+"/a"] + podXAIP, _ := podIPs[getPodName("x", "a")] + podXBIP, _ := podIPs[getPodName("x", "b")] + podXCIP, _ := podIPs[getPodName("x", "c")] + podZAIP, _ := podIPs[getPodName("z", "a")] // There are three situations of a Pod's IP(s): // 1. Only one IPv4 address. // 2. Only one IPv6 address. @@ -1112,19 +1154,19 @@ func testACNPClusterGroupRefRuleIPBlocks(t *testing.T) { SetAppliedToGroup([]ACNPAppliedToSpec{ { PodSelector: map[string]string{"pod": "a"}, - NSSelector: map[string]string{"ns": namespaces["y"]}, + NSSelector: map[string]string{"ns": getNS("y")}, }, }) builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName2, "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, cgName2, "", nil) reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["y"]+"/a"), Dropped) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped) - reachability.Expect(Pod(namespaces["x"]+"/c"), Pod(namespaces["y"]+"/a"), Dropped) - reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["y"]+"/a"), Dropped) + reachability.Expect(getPod("x", "a"), getPod("y", "a"), Dropped) + reachability.Expect(getPod("x", "b"), getPod("y", "a"), Dropped) + reachability.Expect(getPod("x", "c"), getPod("y", "a"), Dropped) + reachability.Expect(getPod("z", "a"), getPod("y", "a"), Dropped) testStep := []*TestStep{ { "Port 80", @@ -1146,16 +1188,16 @@ func testACNPClusterGroupRefRuleIPBlocks(t *testing.T) { func testANNPEgressRulePodsAToGrpWithPodsC(t *testing.T) { grpName := "grp-xc" grpBuilder := &GroupSpecBuilder{} - grpBuilder = grpBuilder.SetName(grpName).SetNamespace(namespaces["x"]).SetPodSelector(map[string]string{"pod": "c"}, nil) + grpBuilder = grpBuilder.SetName(grpName).SetNamespace(getNS("x")).SetPodSelector(map[string]string{"pod": "c"}, nil) builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["x"], "annp-deny-xa-to-grp-xc-egress"). + builder = builder.SetName(getNS("x"), "annp-deny-xa-to-grp-xc-egress"). SetPriority(1.0). SetAppliedToGroup([]ANNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, grpName, "") reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["x"]+"/c"), Dropped) + reachability.Expect(getPod("x", "a"), getPod("x", "c"), Dropped) testStep := []*TestStep{ { "Port 80", @@ -1178,17 +1220,17 @@ func testANNPEgressRulePodsAToGrpWithPodsC(t *testing.T) { func testANNPIngressRuleDenyGrpWithXCtoXA(t *testing.T) { grpName := "grp-pods-xb" grpBuilder := &GroupSpecBuilder{} - grpBuilder = grpBuilder.SetName(grpName).SetNamespace(namespaces["x"]).SetPodSelector(map[string]string{"pod": "b"}, nil) + grpBuilder = grpBuilder.SetName(grpName).SetNamespace(getNS("x")).SetPodSelector(map[string]string{"pod": "b"}, nil) port81Name := "serve-81" builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["x"], "annp-deny-grp-with-xb-to-xa"). + builder = builder.SetName(getNS("x"), "annp-deny-grp-with-xb-to-xa"). SetPriority(2.0). SetAppliedToGroup([]ANNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) builder.AddIngress(ProtocolTCP, nil, &port81Name, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, grpName, "") reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["x"]+"/a"), Dropped) + reachability.Expect(getPod("x", "b"), getPod("x", "a"), Dropped) reachability.ExpectSelf(allPods, Connected) testStep := []*TestStep{ @@ -1211,22 +1253,22 @@ func testANNPIngressRuleDenyGrpWithXCtoXA(t *testing.T) { func testANNPGroupUpdate(t *testing.T) { grpName := "grp-pod-xc-then-pod-xb" grpBuilder := &GroupSpecBuilder{} - grpBuilder = grpBuilder.SetName(grpName).SetNamespace(namespaces["x"]).SetPodSelector(map[string]string{"pod": "c"}, nil) + grpBuilder = grpBuilder.SetName(grpName).SetNamespace(getNS("x")).SetPodSelector(map[string]string{"pod": "c"}, nil) // Update Group Pod selector from X/C to X/B updatedGrpBuilder := &GroupSpecBuilder{} - updatedGrpBuilder = updatedGrpBuilder.SetName(grpName).SetNamespace(namespaces["x"]).SetPodSelector(map[string]string{"pod": "b"}, nil) + updatedGrpBuilder = updatedGrpBuilder.SetName(grpName).SetNamespace(getNS("x")).SetPodSelector(map[string]string{"pod": "b"}, nil) builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["x"], "annp-deny-xa-to-grp-with-xc-egress"). + builder = builder.SetName(getNS("x"), "annp-deny-xa-to-grp-with-xc-egress"). SetPriority(1.0). SetAppliedToGroup([]ANNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, grpName, "") reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["x"]+"/c"), Dropped) + reachability.Expect(getPod("x", "a"), getPod("x", "c"), Dropped) updatedReachability := NewReachability(allPods, Connected) - updatedReachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["x"]+"/b"), Dropped) + updatedReachability.Expect(getPod("x", "a"), getPod("x", "b"), Dropped) testStep := []*TestStep{ { "Port 80", @@ -1257,17 +1299,17 @@ func testANNPGroupUpdate(t *testing.T) { func testANNPAppliedToDenyXBtoGrpWithXA(t *testing.T) { grpName := "grp-pods-ya" grpBuilder := &GroupSpecBuilder{} - grpBuilder = grpBuilder.SetName(grpName).SetNamespace(namespaces["x"]).SetPodSelector(map[string]string{"pod": "a"}, nil) + grpBuilder = grpBuilder.SetName(grpName).SetNamespace(getNS("x")).SetPodSelector(map[string]string{"pod": "a"}, nil) port81Name := "serve-81" builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["x"], "annp-deny-grp-with-xa-from-xb"). + builder = builder.SetName(getNS("x"), "annp-deny-grp-with-xa-from-xb"). SetPriority(2.0). SetAppliedToGroup([]ANNPAppliedToSpec{{Group: grpName}}) builder.AddIngress(ProtocolTCP, nil, &port81Name, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "") reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["x"]+"/a"), Dropped) + reachability.Expect(getPod("x", "b"), getPod("x", "a"), Dropped) reachability.ExpectSelf(allPods, Connected) testStep := []*TestStep{ @@ -1292,15 +1334,15 @@ func testANNPAppliedToDenyXBtoGrpWithXA(t *testing.T) { func testANNPAppliedToRuleGrpWithPodsAToPodsC(t *testing.T) { grpName := "grp-pods-a" grpBuilder := &GroupSpecBuilder{} - grpBuilder = grpBuilder.SetName(grpName).SetNamespace(namespaces["x"]).SetPodSelector(map[string]string{"pod": "a"}, nil) + grpBuilder = grpBuilder.SetName(grpName).SetNamespace(getNS("x")).SetPodSelector(map[string]string{"pod": "a"}, nil) builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["x"], "annp-deny-grp-with-a-to-c"). + builder = builder.SetName(getNS("x"), "annp-deny-grp-with-a-to-c"). SetPriority(1.0) builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "c"}, nil, nil, nil, nil, nil, []ANNPAppliedToSpec{{Group: grpName}}, crdv1beta1.RuleActionDrop, "", "") reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["x"]+"/c"), Dropped) + reachability.Expect(getPod("x", "a"), getPod("x", "c"), Dropped) testStep := []*TestStep{ { "Port 80", @@ -1322,22 +1364,22 @@ func testANNPAppliedToRuleGrpWithPodsAToPodsC(t *testing.T) { func testANNPGroupUpdateAppliedTo(t *testing.T) { grpName := "grp-pods-xa-then-xb" grpBuilder := &GroupSpecBuilder{} - grpBuilder = grpBuilder.SetName(grpName).SetNamespace(namespaces["x"]).SetPodSelector(map[string]string{"pod": "a"}, nil) + grpBuilder = grpBuilder.SetName(grpName).SetNamespace(getNS("x")).SetPodSelector(map[string]string{"pod": "a"}, nil) // Update GRP Pod selector to group Pods x/b updatedGrpBuilder := &GroupSpecBuilder{} - updatedGrpBuilder = updatedGrpBuilder.SetName(grpName).SetNamespace(namespaces["x"]).SetPodSelector(map[string]string{"pod": "b"}, nil) + updatedGrpBuilder = updatedGrpBuilder.SetName(grpName).SetNamespace(getNS("x")).SetPodSelector(map[string]string{"pod": "b"}, nil) builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["x"], "annp-deny-grp-xc-to-xa-egress"). + builder = builder.SetName(getNS("x"), "annp-deny-grp-xc-to-xa-egress"). SetPriority(1.0). SetAppliedToGroup([]ANNPAppliedToSpec{{Group: grpName}}) builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "c"}, nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "") reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["x"]+"/c"), Dropped) + reachability.Expect(getPod("x", "a"), getPod("x", "c"), Dropped) updatedReachability := NewReachability(allPods, Connected) - updatedReachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["x"]+"/c"), Dropped) + updatedReachability.Expect(getPod("x", "b"), getPod("x", "c"), Dropped) testStep := []*TestStep{ { "GRP Pods X/C", @@ -1367,9 +1409,9 @@ func testANNPGroupUpdateAppliedTo(t *testing.T) { func testANNPGroupAppliedToPodAdd(t *testing.T, data *TestData) { grpName := "grp-pod-custom-pod-xj" grpBuilder := &GroupSpecBuilder{} - grpBuilder = grpBuilder.SetName(grpName).SetNamespace(namespaces["x"]).SetPodSelector(map[string]string{"pod": "j"}, nil) + grpBuilder = grpBuilder.SetName(grpName).SetNamespace(getNS("x")).SetPodSelector(map[string]string{"pod": "j"}, nil) builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["x"], "annp-deny-grp-with-xj-to-xd-egress"). + builder = builder.SetName(getNS("x"), "annp-deny-grp-with-xj-to-xd-egress"). SetPriority(1.0). SetAppliedToGroup([]ANNPAppliedToSpec{{Group: grpName}}) builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "d"}, nil, nil, @@ -1377,11 +1419,11 @@ func testANNPGroupAppliedToPodAdd(t *testing.T, data *TestData) { cp := []*CustomProbe{ { SourcePod: CustomPod{ - Pod: NewPod(namespaces["x"], "j"), + Pod: NewPod(getNS("x"), "j"), Labels: map[string]string{"pod": "j"}, }, DestPod: CustomPod{ - Pod: NewPod(namespaces["x"], "d"), + Pod: NewPod(getNS("x"), "d"), Labels: map[string]string{"pod": "d"}, }, ExpectConnectivity: Dropped, @@ -1406,17 +1448,17 @@ func testANNPGroupAppliedToPodAdd(t *testing.T, data *TestData) { } func testANNPGroupServiceRefPodAdd(t *testing.T, data *TestData) { - svc1 := k8sUtils.BuildService("svc1", namespaces["x"], 80, 80, map[string]string{"app": "a"}, nil) - svc2 := k8sUtils.BuildService("svc2", namespaces["x"], 80, 80, map[string]string{"app": "b"}, nil) + svc1 := k8sUtils.BuildService("svc1", getNS("x"), 80, 80, map[string]string{"app": "a"}, nil) + svc2 := k8sUtils.BuildService("svc2", getNS("x"), 80, 80, map[string]string{"app": "b"}, nil) grp1Name, grp2Name := "grp-svc1", "grp-svc2" grpBuilder1 := &GroupSpecBuilder{} - grpBuilder1 = grpBuilder1.SetName(grp1Name).SetNamespace(namespaces["x"]).SetServiceReference(namespaces["x"], "svc1") + grpBuilder1 = grpBuilder1.SetName(grp1Name).SetNamespace(getNS("x")).SetServiceReference(getNS("x"), "svc1") grpBuilder2 := &GroupSpecBuilder{} - grpBuilder2 = grpBuilder2.SetName(grp2Name).SetNamespace(namespaces["x"]).SetServiceReference(namespaces["x"], "svc2") + grpBuilder2 = grpBuilder2.SetName(grp2Name).SetNamespace(getNS("x")).SetServiceReference(getNS("x"), "svc2") builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["x"], "annp-grp-svc-ref").SetPriority(1.0).SetAppliedToGroup([]ANNPAppliedToSpec{{Group: grp1Name}}) + builder = builder.SetName(getNS("x"), "annp-grp-svc-ref").SetPriority(1.0).SetAppliedToGroup([]ANNPAppliedToSpec{{Group: grp1Name}}) builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, grp2Name, "") @@ -1425,11 +1467,11 @@ func testANNPGroupServiceRefPodAdd(t *testing.T, data *TestData) { cp := []*CustomProbe{ { SourcePod: CustomPod{ - Pod: NewPod(namespaces["x"], svc2PodName), + Pod: NewPod(getNS("x"), svc2PodName), Labels: map[string]string{"pod": svc2PodName, "app": "b"}, }, DestPod: CustomPod{ - Pod: NewPod(namespaces["x"], svc1PodName), + Pod: NewPod(getNS("x"), svc1PodName), Labels: map[string]string{"pod": svc1PodName, "app": "a"}, }, ExpectConnectivity: Dropped, @@ -1438,7 +1480,7 @@ func testANNPGroupServiceRefPodAdd(t *testing.T, data *TestData) { } reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["x"]+"/a"), Dropped) + reachability.Expect(getPod("x", "b"), getPod("x", "a"), Dropped) testStep := &TestStep{ "Port 80 updated", reachability, @@ -1457,8 +1499,8 @@ func testANNPGroupServiceRefPodAdd(t *testing.T, data *TestData) { } func testANNPGroupServiceRefDelete(t *testing.T) { - svc1 := k8sUtils.BuildService("svc1", namespaces["x"], 80, 80, map[string]string{"app": "a"}, nil) - svc2 := k8sUtils.BuildService("svc2", namespaces["x"], 80, 80, map[string]string{"app": "b"}, nil) + svc1 := k8sUtils.BuildService("svc1", getNS("x"), 80, 80, map[string]string{"app": "a"}, nil) + svc2 := k8sUtils.BuildService("svc2", getNS("x"), 80, 80, map[string]string{"app": "b"}, nil) k8sUtils.CreateOrUpdateService(svc1) failOnError(waitForResourceReady(t, timeout, svc1), t) k8sUtils.CreateOrUpdateService(svc2) @@ -1466,9 +1508,9 @@ func testANNPGroupServiceRefDelete(t *testing.T) { grp1Name, grp2Name := "grp-svc1", "grp-svc2" grpBuilder1 := &GroupSpecBuilder{} - grpBuilder1 = grpBuilder1.SetName(grp1Name).SetNamespace(namespaces["x"]).SetServiceReference(namespaces["x"], "svc1") + grpBuilder1 = grpBuilder1.SetName(grp1Name).SetNamespace(getNS("x")).SetServiceReference(getNS("x"), "svc1") grpBuilder2 := &GroupSpecBuilder{} - grpBuilder2 = grpBuilder2.SetName(grp2Name).SetNamespace(namespaces["x"]).SetServiceReference(namespaces["x"], "svc2") + grpBuilder2 = grpBuilder2.SetName(grp2Name).SetNamespace(getNS("x")).SetServiceReference(getNS("x"), "svc2") grp1 := grpBuilder1.Get() k8sUtils.CreateOrUpdateGroup(grp1) failOnError(waitForResourceReady(t, timeout, grp1), t) @@ -1477,7 +1519,7 @@ func testANNPGroupServiceRefDelete(t *testing.T) { failOnError(waitForResourceReady(t, timeout, grp2), t) builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["x"], "annp-grp-svc-ref").SetPriority(1.0).SetAppliedToGroup([]ANNPAppliedToSpec{{Group: grp1Name}}) + builder = builder.SetName(getNS("x"), "annp-grp-svc-ref").SetPriority(1.0).SetAppliedToGroup([]ANNPAppliedToSpec{{Group: grp1Name}}) builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, grp2Name, "") annp := builder.Get() @@ -1485,7 +1527,7 @@ func testANNPGroupServiceRefDelete(t *testing.T) { failOnError(waitForResourceReady(t, timeout, annp), t) reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["x"]+"/a"), Dropped) + reachability.Expect(getPod("x", "b"), getPod("x", "a"), Dropped) k8sUtils.Validate(allPods, reachability, []int32{80}, ProtocolTCP) _, wrong, _ := reachability.Summary() if wrong != 0 { @@ -1508,23 +1550,23 @@ func testANNPGroupServiceRefDelete(t *testing.T) { } func testANNPGroupServiceRefCreateAndUpdate(t *testing.T) { - svc1 := k8sUtils.BuildService("svc1", namespaces["x"], 80, 80, map[string]string{"app": "a"}, nil) - svc2 := k8sUtils.BuildService("svc2", namespaces["x"], 80, 80, map[string]string{"app": "b"}, nil) + svc1 := k8sUtils.BuildService("svc1", getNS("x"), 80, 80, map[string]string{"app": "a"}, nil) + svc2 := k8sUtils.BuildService("svc2", getNS("x"), 80, 80, map[string]string{"app": "b"}, nil) grp1Name, grp2Name := "grp-svc1", "grp-svc2" grpBuilder1 := &GroupSpecBuilder{} - grpBuilder1 = grpBuilder1.SetName(grp1Name).SetNamespace(namespaces["x"]).SetServiceReference(namespaces["x"], "svc1") + grpBuilder1 = grpBuilder1.SetName(grp1Name).SetNamespace(getNS("x")).SetServiceReference(getNS("x"), "svc1") grpBuilder2 := &GroupSpecBuilder{} - grpBuilder2 = grpBuilder2.SetName(grp2Name).SetNamespace(namespaces["x"]).SetServiceReference(namespaces["x"], "svc2") + grpBuilder2 = grpBuilder2.SetName(grp2Name).SetNamespace(getNS("x")).SetServiceReference(getNS("x"), "svc2") builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["x"], "annp-grp-svc-ref").SetPriority(1.0).SetAppliedToGroup([]ANNPAppliedToSpec{{Group: grp1Name}}) + builder = builder.SetName(getNS("x"), "annp-grp-svc-ref").SetPriority(1.0).SetAppliedToGroup([]ANNPAppliedToSpec{{Group: grp1Name}}) builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, grp2Name, "") // Pods backing svc1 (label pod=a) in Namespace x should not allow ingress from Pods backing svc2 (label pod=b) in Namespace x. reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["x"]+"/a"), Dropped) + reachability.Expect(getPod("x", "b"), getPod("x", "a"), Dropped) testStep1 := &TestStep{ "Port 80", reachability, @@ -1536,13 +1578,13 @@ func testANNPGroupServiceRefCreateAndUpdate(t *testing.T) { } // Test update selector of Service referred in grp-svc1, and update serviceReference of grp-svc2. - svc1Updated := k8sUtils.BuildService("svc1", namespaces["x"], 80, 80, map[string]string{"app": "b"}, nil) - svc3 := k8sUtils.BuildService("svc3", namespaces["x"], 80, 80, map[string]string{"app": "c"}, nil) - grpBuilder2Updated := grpBuilder2.SetNamespace(namespaces["x"]).SetServiceReference(namespaces["x"], "svc3") + svc1Updated := k8sUtils.BuildService("svc1", getNS("x"), 80, 80, map[string]string{"app": "b"}, nil) + svc3 := k8sUtils.BuildService("svc3", getNS("x"), 80, 80, map[string]string{"app": "c"}, nil) + grpBuilder2Updated := grpBuilder2.SetNamespace(getNS("x")).SetServiceReference(getNS("x"), "svc3") // Pods backing svc1 (label pod=b) in namespace x should not allow ingress from Pods backing svc3 (label pod=d) in namespace x. reachability2 := NewReachability(allPods, Connected) - reachability2.Expect(Pod(namespaces["x"]+"/c"), Pod(namespaces["x"]+"/b"), Dropped) + reachability2.Expect(getPod("x", "c"), getPod("x", "b"), Dropped) testStep2 := &TestStep{ "Port 80 updated", reachability2, @@ -1561,8 +1603,8 @@ func testANNPGroupServiceRefCreateAndUpdate(t *testing.T) { } func testANNPGroupRefRuleIPBlocks(t *testing.T) { - podXBIP, _ := podIPs[namespaces["x"]+"/b"] - podXCIP, _ := podIPs[namespaces["x"]+"/c"] + podXBIP, _ := podIPs[getPodName("x", "b")] + podXCIP, _ := podIPs[getPodName("x", "c")] // There are three situations of a Pod's IP(s): // 1. Only one IPv4 address. // 2. Only one IPv6 address. @@ -1582,18 +1624,18 @@ func testANNPGroupRefRuleIPBlocks(t *testing.T) { grpName := "grp-ipblocks-pod-xb-xc" grpBuilder := &GroupSpecBuilder{} - grpBuilder = grpBuilder.SetName(grpName).SetNamespace(namespaces["x"]).SetIPBlocks(ipBlock) + grpBuilder = grpBuilder.SetName(grpName).SetNamespace(getNS("x")).SetIPBlocks(ipBlock) builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["x"], "annp-deny-xb-xc-ips-ingress-for-xa"). + builder = builder.SetName(getNS("x"), "annp-deny-xb-xc-ips-ingress-for-xa"). SetPriority(1.0). SetAppliedToGroup([]ANNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, grpName, "") reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["x"]+"/a"), Dropped) - reachability.Expect(Pod(namespaces["x"]+"/c"), Pod(namespaces["x"]+"/a"), Dropped) + reachability.Expect(getPod("x", "b"), getPod("x", "a"), Dropped) + reachability.Expect(getPod("x", "c"), getPod("x", "a"), Dropped) testStep := []*TestStep{ { "Port 80", @@ -1612,21 +1654,21 @@ func testANNPGroupRefRuleIPBlocks(t *testing.T) { } func testANNPNestedGroupCreateAndUpdate(t *testing.T, data *TestData) { - svc1 := k8sUtils.BuildService("svc1", namespaces["x"], 80, 80, map[string]string{"app": "a"}, nil) + svc1 := k8sUtils.BuildService("svc1", getNS("x"), 80, 80, map[string]string{"app": "a"}, nil) svc1PodName := randName("test-pod-svc1-") grp1Name, grp2Name, grp3Name := "grp-svc-x-a", "grp-select-x-b", "grp-select-x-c" grpBuilder1 := &GroupSpecBuilder{} - grpBuilder1 = grpBuilder1.SetName(grp1Name).SetNamespace(namespaces["x"]).SetServiceReference(namespaces["x"], "svc1") + grpBuilder1 = grpBuilder1.SetName(grp1Name).SetNamespace(getNS("x")).SetServiceReference(getNS("x"), "svc1") grpBuilder2 := &GroupSpecBuilder{} - grpBuilder2 = grpBuilder2.SetName(grp2Name).SetNamespace(namespaces["x"]).SetPodSelector(map[string]string{"pod": "b"}, nil) + grpBuilder2 = grpBuilder2.SetName(grp2Name).SetNamespace(getNS("x")).SetPodSelector(map[string]string{"pod": "b"}, nil) grpBuilder3 := &GroupSpecBuilder{} - grpBuilder3 = grpBuilder3.SetName(grp3Name).SetNamespace(namespaces["x"]).SetPodSelector(map[string]string{"pod": "c"}, nil) + grpBuilder3 = grpBuilder3.SetName(grp3Name).SetNamespace(getNS("x")).SetPodSelector(map[string]string{"pod": "c"}, nil) grpNestedName := "grp-nested" grpBuilderNested := &GroupSpecBuilder{} - grpBuilderNested = grpBuilderNested.SetName(grpNestedName).SetNamespace(namespaces["x"]).SetChildGroups([]string{grp1Name, grp3Name}) + grpBuilderNested = grpBuilderNested.SetName(grpNestedName).SetNamespace(getNS("x")).SetChildGroups([]string{grp1Name, grp3Name}) builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["x"], "annp-nested-grp").SetPriority(1.0). + builder = builder.SetName(getNS("x"), "annp-nested-grp").SetPriority(1.0). SetAppliedToGroup([]ANNPAppliedToSpec{{}}). AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, grpNestedName, "") @@ -1635,7 +1677,7 @@ func testANNPNestedGroupCreateAndUpdate(t *testing.T, data *TestData) { // Note that in this testStep grp3 will not be created yet, so even though grp-nested selects grp1 and // grp3 as childGroups, only members of grp1 will be included as this time. reachability := NewReachability(allPods, Connected) - reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["x"], Dropped) + reachability.ExpectEgressToNamespace(getPod("x", "a"), getNS("x"), Dropped) reachability.ExpectSelf(allPods, Connected) testStep1 := &TestStep{ @@ -1653,18 +1695,18 @@ func testANNPNestedGroupCreateAndUpdate(t *testing.T, data *TestData) { grpBuilderNested = grpBuilderNested.SetChildGroups([]string{grp1Name, grp2Name, grp3Name}) // In addition to x/a, all traffic from x/b to Namespace x should also be denied. reachability2 := NewReachability(allPods, Connected) - reachability2.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["x"], Dropped) - reachability2.ExpectEgressToNamespace(Pod(namespaces["x"]+"/b"), namespaces["x"], Dropped) + reachability2.ExpectEgressToNamespace(getPod("x", "a"), getNS("x"), Dropped) + reachability2.ExpectEgressToNamespace(getPod("x", "b"), getNS("x"), Dropped) reachability2.ExpectSelf(allPods, Connected) // New member in grp-svc-x-a should be reflected in grp-nested as well. cp := []*CustomProbe{ { SourcePod: CustomPod{ - Pod: NewPod(namespaces["x"], svc1PodName), + Pod: NewPod(getNS("x"), svc1PodName), Labels: map[string]string{"pod": svc1PodName, "app": "a"}, }, DestPod: CustomPod{ - Pod: NewPod(namespaces["x"], "test-add-pod-ns-x"), + Pod: NewPod(getNS("x"), "test-add-pod-ns-x"), Labels: map[string]string{"pod": "test-add-pod-ns-x"}, }, ExpectConnectivity: Dropped, @@ -1684,9 +1726,9 @@ func testANNPNestedGroupCreateAndUpdate(t *testing.T, data *TestData) { // In this testStep grp3 is created. It's members should reflect in grp-nested // and as a result, all traffic from x/c to Namespace x should be denied as well. reachability3 := NewReachability(allPods, Connected) - reachability3.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["x"], Dropped) - reachability3.ExpectEgressToNamespace(Pod(namespaces["x"]+"/b"), namespaces["x"], Dropped) - reachability3.ExpectEgressToNamespace(Pod(namespaces["x"]+"/c"), namespaces["x"], Dropped) + reachability3.ExpectEgressToNamespace(getPod("x", "a"), getNS("x"), Dropped) + reachability3.ExpectEgressToNamespace(getPod("x", "b"), getNS("x"), Dropped) + reachability3.ExpectEgressToNamespace(getPod("x", "c"), getNS("x"), Dropped) reachability3.ExpectSelf(allPods, Connected) testStep3 := &TestStep{ "Port 80 updated", @@ -1712,36 +1754,36 @@ func testBaselineNamespaceIsolation(t *testing.T) { nsExpOtherThanX := metav1.LabelSelectorRequirement{ Key: "ns", Operator: metav1.LabelSelectorOpNotIn, - Values: []string{namespaces["x"]}, + Values: []string{getNS("x")}, } builder = builder.SetName("acnp-baseline-isolate-ns-x"). SetTier("baseline"). SetPriority(1.0). - SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, []metav1.LabelSelectorRequirement{nsExpOtherThanX}, false, - nil, crdv1beta1.RuleActionDrop, "", "", nil) + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}) + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, + nil, nil, []metav1.LabelSelectorRequirement{nsExpOtherThanX}, nil, + nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) // create a K8s NetworkPolicy for Pods in namespace x to allow ingress traffic from Pods in the same namespace, // as well as from the y/a Pod. It should open up ingress from y/a since it's evaluated before the baseline tier. k8sNPBuilder := &NetworkPolicySpecBuilder{} - k8sNPBuilder = k8sNPBuilder.SetName(namespaces["x"], "allow-ns-x-and-y-a"). + k8sNPBuilder = k8sNPBuilder.SetName(getNS("x"), "allow-ns-x-and-y-a"). SetTypeIngress(). AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, - nil, map[string]string{"ns": namespaces["x"]}, nil, nil). + nil, map[string]string{"ns": getNS("x")}, nil, nil). AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, - map[string]string{"pod": "a"}, map[string]string{"ns": namespaces["y"]}, nil, nil) + map[string]string{"pod": "a"}, map[string]string{"ns": getNS("y")}, nil, nil) reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["y"]+"/b"), Pod(namespaces["x"]+"/a"), Dropped) - reachability.Expect(Pod(namespaces["y"]+"/c"), Pod(namespaces["x"]+"/a"), Dropped) - reachability.ExpectIngressFromNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) - reachability.Expect(Pod(namespaces["y"]+"/b"), Pod(namespaces["x"]+"/b"), Dropped) - reachability.Expect(Pod(namespaces["y"]+"/c"), Pod(namespaces["x"]+"/b"), Dropped) - reachability.ExpectIngressFromNamespace(Pod(namespaces["x"]+"/b"), namespaces["z"], Dropped) - reachability.Expect(Pod(namespaces["y"]+"/b"), Pod(namespaces["x"]+"/c"), Dropped) - reachability.Expect(Pod(namespaces["y"]+"/c"), Pod(namespaces["x"]+"/c"), Dropped) - reachability.ExpectIngressFromNamespace(Pod(namespaces["x"]+"/c"), namespaces["z"], Dropped) + reachability.Expect(getPod("y", "b"), getPod("x", "a"), Dropped) + reachability.Expect(getPod("y", "c"), getPod("x", "a"), Dropped) + reachability.ExpectIngressFromNamespace(getPod("x", "a"), getNS("z"), Dropped) + reachability.Expect(getPod("y", "b"), getPod("x", "b"), Dropped) + reachability.Expect(getPod("y", "c"), getPod("x", "b"), Dropped) + reachability.ExpectIngressFromNamespace(getPod("x", "b"), getNS("z"), Dropped) + reachability.Expect(getPod("y", "b"), getPod("x", "c"), Dropped) + reachability.Expect(getPod("y", "c"), getPod("x", "c"), Dropped) + reachability.ExpectIngressFromNamespace(getPod("x", "c"), getNS("z"), Dropped) testStep := []*TestStep{ { "Port 80", @@ -1758,7 +1800,7 @@ func testBaselineNamespaceIsolation(t *testing.T) { } executeTests(t, testCase) // Cleanup the K8s NetworkPolicy created for this test. - failOnError(k8sUtils.CleanNetworkPolicies(map[string]string{"x": namespaces["x"]}), t) + failOnError(k8sUtils.CleanNetworkPolicies(map[string]TestNamespaceMeta{"x": {Name: getNS("x")}}), t) time.Sleep(networkPolicyDelay) } @@ -1768,43 +1810,43 @@ func testACNPPriorityOverride(t *testing.T) { builder1 := &ClusterNetworkPolicySpecBuilder{} builder1 = builder1.SetName("acnp-priority1"). SetPriority(1.001). - SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}}) + SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": getNS("x")}}}) // Highest priority. Drops traffic from z/b to x/a. - builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-priority2"). SetPriority(1.002). - SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}}) + SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": getNS("x")}}}) // Medium priority. Allows traffic from z to x/a. - builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) builder3 := &ClusterNetworkPolicySpecBuilder{} builder3 = builder3.SetName("acnp-priority3"). SetPriority(1.003). - SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}) // Lowest priority. Drops traffic from z to x. - builder3.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder3.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachabilityTwoACNPs := NewReachability(allPods, Connected) - reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/b"), Dropped) - reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/c"), Dropped) - reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/b"), Dropped) - reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/c"), Dropped) - reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["x"]+"/b"), Dropped) - reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["x"]+"/c"), Dropped) + reachabilityTwoACNPs.Expect(getPod("z", "a"), getPod("x", "b"), Dropped) + reachabilityTwoACNPs.Expect(getPod("z", "a"), getPod("x", "c"), Dropped) + reachabilityTwoACNPs.Expect(getPod("z", "b"), getPod("x", "b"), Dropped) + reachabilityTwoACNPs.Expect(getPod("z", "b"), getPod("x", "c"), Dropped) + reachabilityTwoACNPs.Expect(getPod("z", "c"), getPod("x", "b"), Dropped) + reachabilityTwoACNPs.Expect(getPod("z", "c"), getPod("x", "c"), Dropped) reachabilityAllACNPs := NewReachability(allPods, Connected) - reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/b"), Dropped) - reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/c"), Dropped) - reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/a"), Dropped) - reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/b"), Dropped) - reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/c"), Dropped) - reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["x"]+"/b"), Dropped) - reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["x"]+"/c"), Dropped) + reachabilityAllACNPs.Expect(getPod("z", "a"), getPod("x", "b"), Dropped) + reachabilityAllACNPs.Expect(getPod("z", "a"), getPod("x", "c"), Dropped) + reachabilityAllACNPs.Expect(getPod("z", "b"), getPod("x", "a"), Dropped) + reachabilityAllACNPs.Expect(getPod("z", "b"), getPod("x", "b"), Dropped) + reachabilityAllACNPs.Expect(getPod("z", "b"), getPod("x", "c"), Dropped) + reachabilityAllACNPs.Expect(getPod("z", "c"), getPod("x", "b"), Dropped) + reachabilityAllACNPs.Expect(getPod("z", "c"), getPod("x", "c"), Dropped) testStepTwoACNP := []*TestStep{ { @@ -1843,45 +1885,45 @@ func testACNPTierOverride(t *testing.T) { builder1 = builder1.SetName("acnp-tier-emergency"). SetTier("emergency"). SetPriority(100). - SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}}) + SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": getNS("x")}}}) // Highest priority tier. Drops traffic from z/b to x/a. - builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-tier-securityops"). SetTier("securityops"). SetPriority(10). - SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}}) + SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": getNS("x")}}}) // Medium priority tier. Allows traffic from z to x/a. - builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) builder3 := &ClusterNetworkPolicySpecBuilder{} builder3 = builder3.SetName("acnp-tier-application"). SetTier("application"). SetPriority(1). - SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}) // Lowest priority tier. Drops traffic from z to x. - builder3.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder3.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachabilityTwoACNPs := NewReachability(allPods, Connected) - reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/b"), Dropped) - reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/c"), Dropped) - reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/b"), Dropped) - reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/c"), Dropped) - reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["x"]+"/b"), Dropped) - reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["x"]+"/c"), Dropped) + reachabilityTwoACNPs.Expect(getPod("z", "a"), getPod("x", "b"), Dropped) + reachabilityTwoACNPs.Expect(getPod("z", "a"), getPod("x", "c"), Dropped) + reachabilityTwoACNPs.Expect(getPod("z", "b"), getPod("x", "b"), Dropped) + reachabilityTwoACNPs.Expect(getPod("z", "b"), getPod("x", "c"), Dropped) + reachabilityTwoACNPs.Expect(getPod("z", "c"), getPod("x", "b"), Dropped) + reachabilityTwoACNPs.Expect(getPod("z", "c"), getPod("x", "c"), Dropped) reachabilityAllACNPs := NewReachability(allPods, Connected) - reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/b"), Dropped) - reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/c"), Dropped) - reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/a"), Dropped) - reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/b"), Dropped) - reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/c"), Dropped) - reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["x"]+"/b"), Dropped) - reachabilityAllACNPs.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["x"]+"/c"), Dropped) + reachabilityAllACNPs.Expect(getPod("z", "a"), getPod("x", "b"), Dropped) + reachabilityAllACNPs.Expect(getPod("z", "a"), getPod("x", "c"), Dropped) + reachabilityAllACNPs.Expect(getPod("z", "b"), getPod("x", "a"), Dropped) + reachabilityAllACNPs.Expect(getPod("z", "b"), getPod("x", "b"), Dropped) + reachabilityAllACNPs.Expect(getPod("z", "b"), getPod("x", "c"), Dropped) + reachabilityAllACNPs.Expect(getPod("z", "c"), getPod("x", "b"), Dropped) + reachabilityAllACNPs.Expect(getPod("z", "c"), getPod("x", "c"), Dropped) testStepTwoACNP := []*TestStep{ { @@ -1927,27 +1969,27 @@ func testACNPCustomTiers(t *testing.T) { builder1 = builder1.SetName("acnp-tier-high"). SetTier("high-priority"). SetPriority(100). - SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}}) + SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": getNS("x")}}}) // Medium priority tier. Allows traffic from z to x/a. - builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-tier-low"). SetTier("low-priority"). SetPriority(1). - SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}) // Lowest priority tier. Drops traffic from z to x. - builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachabilityTwoACNPs := NewReachability(allPods, Connected) - reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/b"), Dropped) - reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/c"), Dropped) - reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/b"), Dropped) - reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["x"]+"/c"), Dropped) - reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["x"]+"/b"), Dropped) - reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["x"]+"/c"), Dropped) + reachabilityTwoACNPs.Expect(getPod("z", "a"), getPod("x", "b"), Dropped) + reachabilityTwoACNPs.Expect(getPod("z", "a"), getPod("x", "c"), Dropped) + reachabilityTwoACNPs.Expect(getPod("z", "b"), getPod("x", "b"), Dropped) + reachabilityTwoACNPs.Expect(getPod("z", "b"), getPod("x", "c"), Dropped) + reachabilityTwoACNPs.Expect(getPod("z", "c"), getPod("x", "b"), Dropped) + reachabilityTwoACNPs.Expect(getPod("z", "c"), getPod("x", "c"), Dropped) testStepTwoACNP := []*TestStep{ { "Two Policies in different tiers", @@ -1975,23 +2017,23 @@ func testACNPPriorityConflictingRule(t *testing.T) { builder1 := &ClusterNetworkPolicySpecBuilder{} builder1 = builder1.SetName("acnp-drop"). SetPriority(1). - SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) - builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}) + builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-allow"). SetPriority(2). - SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}) // The following ingress rule will take no effect as it is exactly the same as ingress rule of cnp-drop, // but cnp-allow has lower priority. - builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) reachabilityBothACNP := NewReachability(allPods, Connected) - reachabilityBothACNP.ExpectEgressToNamespace(Pod(namespaces["z"]+"/a"), namespaces["x"], Dropped) - reachabilityBothACNP.ExpectEgressToNamespace(Pod(namespaces["z"]+"/b"), namespaces["x"], Dropped) - reachabilityBothACNP.ExpectEgressToNamespace(Pod(namespaces["z"]+"/c"), namespaces["x"], Dropped) + reachabilityBothACNP.ExpectEgressToNamespace(getPod("z", "a"), getNS("x"), Dropped) + reachabilityBothACNP.ExpectEgressToNamespace(getPod("z", "b"), getNS("x"), Dropped) + reachabilityBothACNP.ExpectEgressToNamespace(getPod("z", "c"), getNS("x"), Dropped) testStep := []*TestStep{ { "Both ACNP", @@ -2016,29 +2058,29 @@ func testACNPRulePriority(t *testing.T) { // acnp-deny will apply to all pods in namespace x builder1 = builder1.SetName("acnp-deny"). SetPriority(5). - SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) - builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["y"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}) + builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("y")}, + nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) // This rule should take no effect as it will be overridden by the first rule of cnp-allow - builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} // acnp-allow will also apply to all pods in namespace x builder2 = builder2.SetName("acnp-allow"). SetPriority(5). - SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) - builder2.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) - // This rule should take no effect as it will be overridden by the first rule of cnp-deny - builder2.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["y"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}) + builder2.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) + // This rule should take no effect as it will be overridden by the first rule of cnp-drop + builder2.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("y")}, + nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) // Only egress from pods in namespace x to namespace y should be denied reachabilityBothACNP := NewReachability(allPods, Connected) - reachabilityBothACNP.ExpectIngressFromNamespace(Pod(namespaces["y"]+"/a"), namespaces["x"], Dropped) - reachabilityBothACNP.ExpectIngressFromNamespace(Pod(namespaces["y"]+"/b"), namespaces["x"], Dropped) - reachabilityBothACNP.ExpectIngressFromNamespace(Pod(namespaces["y"]+"/c"), namespaces["x"], Dropped) + reachabilityBothACNP.ExpectIngressFromNamespace(getPod("y", "a"), getNS("x"), Dropped) + reachabilityBothACNP.ExpectIngressFromNamespace(getPod("y", "b"), getNS("x"), Dropped) + reachabilityBothACNP.ExpectIngressFromNamespace(getPod("y", "c"), getNS("x"), Dropped) testStep := []*TestStep{ { "Both ACNP", @@ -2062,14 +2104,14 @@ func testACNPPortRange(t *testing.T) { builder = builder.SetName("acnp-deny-a-to-z-egress-port-range"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddEgress(ProtocolTCP, &p8080, nil, &p8082, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "acnp-port-range", nil) + builder.AddEgress(ProtocolTCP, &p8080, nil, &p8082, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "acnp-port-range", nil) reachability := NewReachability(allPods, Connected) - reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) - reachability.ExpectEgressToNamespace(Pod(namespaces["y"]+"/a"), namespaces["z"], Dropped) - reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/b"), Dropped) - reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/c"), Dropped) + reachability.ExpectEgressToNamespace(getPod("x", "a"), getNS("z"), Dropped) + reachability.ExpectEgressToNamespace(getPod("y", "a"), getNS("z"), Dropped) + reachability.Expect(getPod("z", "a"), getPod("z", "b"), Dropped) + reachability.Expect(getPod("z", "a"), getPod("z", "c"), Dropped) testSteps := []*TestStep{ { fmt.Sprintf("ACNP Drop Ports 8080:8082"), @@ -2094,14 +2136,14 @@ func testACNPRejectEgress(t *testing.T) { builder = builder.SetName("acnp-reject-a-to-z-egress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) + builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionReject, "", "", nil) reachability := NewReachability(allPods, Connected) - reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Rejected) - reachability.ExpectEgressToNamespace(Pod(namespaces["y"]+"/a"), namespaces["z"], Rejected) - reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/b"), Rejected) - reachability.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["z"]+"/c"), Rejected) + reachability.ExpectEgressToNamespace(getPod("x", "a"), getNS("z"), Rejected) + reachability.ExpectEgressToNamespace(getPod("y", "a"), getNS("z"), Rejected) + reachability.Expect(getPod("z", "a"), getPod("z", "b"), Rejected) + reachability.Expect(getPod("z", "a"), getPod("z", "c"), Rejected) testStep := []*TestStep{ { "Port 80", @@ -2125,14 +2167,14 @@ func testACNPRejectIngress(t *testing.T, protocol AntreaPolicyProtocol) { builder = builder.SetName("acnp-reject-a-from-z-ingress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddIngress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) + builder.AddIngress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionReject, "", "", nil) reachability := NewReachability(allPods, Connected) - reachability.ExpectIngressFromNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Rejected) - reachability.ExpectIngressFromNamespace(Pod(namespaces["y"]+"/a"), namespaces["z"], Rejected) - reachability.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["z"]+"/a"), Rejected) - reachability.Expect(Pod(namespaces["z"]+"/c"), Pod(namespaces["z"]+"/a"), Rejected) + reachability.ExpectIngressFromNamespace(getPod("x", "a"), getNS("z"), Rejected) + reachability.ExpectIngressFromNamespace(getPod("y", "a"), getNS("z"), Rejected) + reachability.Expect(getPod("z", "b"), getPod("z", "a"), Rejected) + reachability.Expect(getPod("z", "c"), getPod("z", "a"), Rejected) testStep := []*TestStep{ { "Port 80", @@ -2183,10 +2225,10 @@ func testRejectServiceTraffic(t *testing.T, data *TestData, clientNamespace, ser builder1 = builder1.SetName("acnp-reject-egress-svc-traffic"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": "agnhost-client"}}}) - builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, svc1.Spec.Selector, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) - builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, svc2.Spec.Selector, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) + builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": "s1"}, nil, + nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionReject, "", "", nil) + builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": "s2"}, nil, + nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionReject, "", "", nil) acnpEgress := builder1.Get() k8sUtils.CreateOrUpdateACNP(acnpEgress) @@ -2210,8 +2252,8 @@ func testRejectServiceTraffic(t *testing.T, data *TestData, clientNamespace, ser builder2 = builder2.SetName("acnp-reject-ingress-svc-traffic"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: svc1.Spec.Selector}, {PodSelector: svc2.Spec.Selector}}) - builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": "agnhost-client"}, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": "agnhost-client"}, nil, + nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionReject, "", "", nil) acnpIngress := builder2.Get() k8sUtils.CreateOrUpdateACNP(acnpIngress) @@ -2301,10 +2343,10 @@ func testRejectNoInfiniteLoop(t *testing.T, data *TestData, clientNamespace, ser builder1 := &ClusterNetworkPolicySpecBuilder{} builder1 = builder1.SetName("acnp-reject-ingress-double-dir"). SetPriority(1.0) - builder1.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil, nil, - nil, nil, nil, false, []ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": clientName}}}, crdv1beta1.RuleActionReject, "", "", nil) - builder1.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil, nil, - nil, nil, nil, false, []ACNPAppliedToSpec{{PodSelector: map[string]string{"app": "nginx"}}}, crdv1beta1.RuleActionReject, "", "", nil) + builder1.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil, + nil, nil, nil, nil, nil, []ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": clientName}}}, crdv1beta1.RuleActionReject, "", "", nil) + builder1.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil, + nil, nil, nil, nil, nil, []ACNPAppliedToSpec{{PodSelector: map[string]string{"app": "nginx"}}}, crdv1beta1.RuleActionReject, "", "", nil) runTestsWithACNP(builder1.Get(), testcases) @@ -2312,10 +2354,10 @@ func testRejectNoInfiniteLoop(t *testing.T, data *TestData, clientNamespace, ser builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-reject-egress-double-dir"). SetPriority(1.0) - builder2.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil, nil, - nil, nil, nil, false, []ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": clientName}}}, crdv1beta1.RuleActionReject, "", "", nil) - builder2.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil, nil, - nil, nil, nil, false, []ACNPAppliedToSpec{{PodSelector: map[string]string{"app": "nginx"}}}, crdv1beta1.RuleActionReject, "", "", nil) + builder2.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil, + nil, nil, nil, nil, nil, []ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": clientName}}}, crdv1beta1.RuleActionReject, "", "", nil) + builder2.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil, + nil, nil, nil, nil, nil, []ACNPAppliedToSpec{{PodSelector: map[string]string{"app": "nginx"}}}, crdv1beta1.RuleActionReject, "", "", nil) runTestsWithACNP(builder2.Get(), testcases) @@ -2324,10 +2366,10 @@ func testRejectNoInfiniteLoop(t *testing.T, data *TestData, clientNamespace, ser builder3 = builder3.SetName("acnp-reject-server-double-dir"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"app": "nginx"}}}) - builder3.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) - builder3.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) + builder3.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil, + nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionReject, "", "", nil) + builder3.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil, + nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionReject, "", "", nil) runTestsWithACNP(builder3.Get(), testcases) @@ -2336,10 +2378,10 @@ func testRejectNoInfiniteLoop(t *testing.T, data *TestData, clientNamespace, ser builder4 = builder4.SetName("acnp-reject-client-double-dir"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": clientName}}}) - builder4.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) - builder4.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) + builder4.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil, + nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionReject, "", "", nil) + builder4.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil, + nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionReject, "", "", nil) runTestsWithACNP(builder4.Get(), testcases) } @@ -2347,14 +2389,14 @@ func testRejectNoInfiniteLoop(t *testing.T, data *TestData, clientNamespace, ser // testANNPPortRange tests the port range in a ANNP can work. func testANNPPortRange(t *testing.T) { builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["y"], "annp-deny-yb-to-xc-egress-port-range"). + builder = builder.SetName(getNS("y"), "annp-deny-yb-to-xc-egress-port-range"). SetPriority(1.0). SetAppliedToGroup([]ANNPAppliedToSpec{{PodSelector: map[string]string{"pod": "b"}}}) - builder.AddEgress(ProtocolTCP, &p8080, nil, &p8082, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "c"}, map[string]string{"ns": namespaces["x"]}, nil, + builder.AddEgress(ProtocolTCP, &p8080, nil, &p8082, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "c"}, map[string]string{"ns": getNS("x")}, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "annp-port-range") reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["y"]+"/b"), Pod(namespaces["x"]+"/c"), Dropped) + reachability.Expect(getPod("y", "b"), getPod("x", "c"), Dropped) var testSteps []*TestStep testSteps = append(testSteps, &TestStep{ @@ -2377,14 +2419,14 @@ func testANNPPortRange(t *testing.T) { // that specifies that. Also it tests that a K8s NetworkPolicy with same appliedTo will not affect its behavior. func testANNPBasic(t *testing.T) { builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["y"], "np-same-name"). + builder = builder.SetName(getNS("y"), "np-same-name"). SetPriority(1.0). SetAppliedToGroup([]ANNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, nil, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": getNS("x")}, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "") reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped) + reachability.Expect(getPod("x", "b"), getPod("y", "a"), Dropped) testStep := []*TestStep{ { "Port 80", @@ -2398,7 +2440,7 @@ func testANNPBasic(t *testing.T) { } // build a K8s NetworkPolicy that has the same appliedTo but allows all traffic. k8sNPBuilder := &NetworkPolicySpecBuilder{} - k8sNPBuilder = k8sNPBuilder.SetName(namespaces["y"], "np-same-name"). + k8sNPBuilder = k8sNPBuilder.SetName(getNS("y"), "np-same-name"). SetPodSelector(map[string]string{"pod": "a"}) k8sNPBuilder.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil) @@ -2424,14 +2466,14 @@ func testANNPBasic(t *testing.T) { // update on the Antrea NetworkPolicy allows traffic from X/B to Y/A on port 80. func testANNPUpdate(t *testing.T, data *TestData) { builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["y"], "np-before-update"). + builder = builder.SetName(getNS("y"), "np-before-update"). SetPriority(1.0). SetAppliedToGroup([]ANNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, nil, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": getNS("x")}, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "") reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped) + reachability.Expect(getPod("x", "b"), getPod("y", "a"), Dropped) annp, err := k8sUtils.CreateOrUpdateANNP(builder.Get()) failOnError(err, t) failOnError(data.waitForANNPRealized(t, annp.Namespace, annp.Name, policyRealizedTimeout), t) @@ -2443,10 +2485,10 @@ func testANNPUpdate(t *testing.T, data *TestData) { } updatedBuilder := &AntreaNetworkPolicySpecBuilder{} - updatedBuilder = updatedBuilder.SetName(namespaces["y"], "np-before-update"). + updatedBuilder = updatedBuilder.SetName(getNS("y"), "np-before-update"). SetPriority(1.0). SetAppliedToGroup([]ANNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - updatedBuilder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, nil, + updatedBuilder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": getNS("x")}, nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "") updatedReachability := NewReachability(allPods, Connected) annp, err = k8sUtils.CreateOrUpdateANNP(updatedBuilder.Get()) @@ -2467,22 +2509,22 @@ func testANNPUpdate(t *testing.T, data *TestData) { func testANNPMultipleAppliedTo(t *testing.T, data *TestData, singleRule bool) { tempLabel := randName("temp-") builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["y"], "np-multiple-appliedto").SetPriority(1.0) + builder = builder.SetName(getNS("y"), "np-multiple-appliedto").SetPriority(1.0) // Make it apply to an extra dummy AppliedTo to ensure it handles multiple AppliedToGroups correctly. // See https://github.com/antrea-io/antrea/issues/2083. if singleRule { builder.SetAppliedToGroup([]ANNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}, {PodSelector: map[string]string{tempLabel: ""}}}) - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, nil, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": getNS("x")}, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "") } else { - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, nil, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": getNS("x")}, nil, nil, nil, nil, []ANNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}, crdv1beta1.RuleActionDrop, "", "") - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, nil, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": getNS("x")}, nil, nil, nil, nil, []ANNPAppliedToSpec{{PodSelector: map[string]string{tempLabel: ""}}}, crdv1beta1.RuleActionDrop, "", "") } reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped) + reachability.Expect(getPod("x", "b"), getPod("y", "a"), Dropped) annp, err := k8sUtils.CreateOrUpdateANNP(builder.Get()) failOnError(err, t) @@ -2495,7 +2537,7 @@ func testANNPMultipleAppliedTo(t *testing.T, data *TestData, singleRule bool) { } t.Logf("Making the Policy apply to y/c by labeling it with the temporary label that matches the dummy AppliedTo") - podYC, err := k8sUtils.GetPodByLabel(namespaces["y"], "c") + podYC, err := k8sUtils.GetPodByLabel(getNS("y"), "c") if err != nil { t.Errorf("Failed to get Pod in Namespace y with label 'pod=c': %v", err) } @@ -2503,8 +2545,8 @@ func testANNPMultipleAppliedTo(t *testing.T, data *TestData, singleRule bool) { podYC, err = k8sUtils.clientset.CoreV1().Pods(podYC.Namespace).Update(context.TODO(), podYC, metav1.UpdateOptions{}) assert.NoError(t, err) reachability = NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/c"), Dropped) + reachability.Expect(getPod("x", "b"), getPod("y", "a"), Dropped) + reachability.Expect(getPod("x", "b"), getPod("y", "c"), Dropped) time.Sleep(networkPolicyDelay) k8sUtils.Validate(allPods, reachability, []int32{80}, ProtocolTCP) _, wrong, _ = reachability.Summary() @@ -2518,7 +2560,7 @@ func testANNPMultipleAppliedTo(t *testing.T, data *TestData, singleRule bool) { _, err = k8sUtils.clientset.CoreV1().Pods(podYC.Namespace).Update(context.TODO(), podYC, metav1.UpdateOptions{}) assert.NoError(t, err) reachability = NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped) + reachability.Expect(getPod("x", "b"), getPod("y", "a"), Dropped) time.Sleep(networkPolicyDelay) k8sUtils.Validate(allPods, reachability, []int32{80}, ProtocolTCP) _, wrong, _ = reachability.Summary() @@ -2621,9 +2663,9 @@ func testAuditLoggingBasic(t *testing.T, data *TestData) { builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName(npName). SetPriority(1.0). - SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}}) - builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", ruleName, nil) + SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": getNS("x")}}}) + builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", ruleName, nil) builder.AddEgressLogging(logLabel) npRef := fmt.Sprintf("AntreaClusterNetworkPolicy:%s", npName) @@ -2631,7 +2673,7 @@ func testAuditLoggingBasic(t *testing.T, data *TestData) { failOnError(err, t) failOnError(data.waitForACNPRealized(t, acnp.Name, policyRealizedTimeout), t) - podXA, err := k8sUtils.GetPodByLabel(namespaces["x"], "a") + podXA, err := k8sUtils.GetPodByLabel(getNS("x"), "a") if err != nil { t.Errorf("Failed to get Pod in Namespace x with label 'pod=a': %v", err) } @@ -2649,9 +2691,9 @@ func testAuditLoggingBasic(t *testing.T, data *TestData) { k8sUtils.Probe(ns1, pod1, ns2, pod2, p80, ProtocolTCP, nil, nil) }() } - oneProbe(namespaces["x"], "a", namespaces["z"], "a") - oneProbe(namespaces["x"], "a", namespaces["z"], "b") - oneProbe(namespaces["x"], "a", namespaces["z"], "c") + oneProbe(getNS("x"), "a", getNS("z"), "a") + oneProbe(getNS("x"), "a", getNS("z"), "b") + oneProbe(getNS("x"), "a", getNS("z"), "c") wg.Wait() // nodeName is guaranteed to be set at this stage, since the framework waits for all Pods to be in Running phase @@ -2664,23 +2706,23 @@ func testAuditLoggingBasic(t *testing.T, data *TestData) { // testAuditLoggingEnableK8s tests that audit logs are generated when K8s NP is applied // tests both Allow traffic by K8s NP and Drop traffic by implicit K8s policy drop func testAuditLoggingEnableK8s(t *testing.T, data *TestData) { - failOnError(data.updateNamespaceWithAnnotations(namespaces["x"], map[string]string{networkpolicy.EnableNPLoggingAnnotationKey: "true"}), t) + failOnError(data.updateNamespaceWithAnnotations(getNS("x"), map[string]string{networkpolicy.EnableNPLoggingAnnotationKey: "true"}), t) // Add a K8s namespaced NetworkPolicy in ns x that allow ingress traffic from // Pod x/b to x/a which default denies other ingress including from Pod x/c to x/a npName := "allow-x-b-to-x-a" k8sNPBuilder := &NetworkPolicySpecBuilder{} - k8sNPBuilder = k8sNPBuilder.SetName(namespaces["x"], npName). + k8sNPBuilder = k8sNPBuilder.SetName(getNS("x"), npName). SetPodSelector(map[string]string{"pod": "a"}). SetTypeIngress(). AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, map[string]string{"pod": "b"}, nil, nil, nil) - npRef := fmt.Sprintf("K8sNetworkPolicy:%s/%s", namespaces["x"], npName) + npRef := fmt.Sprintf("K8sNetworkPolicy:%s/%s", getNS("x"), npName) knp, err := k8sUtils.CreateOrUpdateNetworkPolicy(k8sNPBuilder.Get()) failOnError(err, t) failOnError(waitForResourceReady(t, timeout, knp), t) - podXA, err := k8sUtils.GetPodByLabel(namespaces["x"], "a") + podXA, err := k8sUtils.GetPodByLabel(getNS("x"), "a") if err != nil { t.Errorf("Failed to get Pod in Namespace x with label 'pod=a': %v", err) } @@ -2702,16 +2744,16 @@ func testAuditLoggingEnableK8s(t *testing.T, data *TestData) { k8sUtils.Probe(ns1, pod1, ns2, pod2, p80, ProtocolTCP, nil, nil) }() } - oneProbe(namespaces["x"], "b", namespaces["x"], "a", matcher1) - oneProbe(namespaces["x"], "c", namespaces["x"], "a", matcher2) + oneProbe(getNS("x"), "b", getNS("x"), "a", matcher1) + oneProbe(getNS("x"), "c", getNS("x"), "a", matcher2) wg.Wait() // nodeName is guaranteed to be set at this stage, since the framework waits for all Pods to be in Running phase nodeName := podXA.Spec.NodeName checkAuditLoggingResult(t, data, nodeName, "K8sNetworkPolicy", append(matcher1.Matchers(), matcher2.Matchers()...)) - failOnError(k8sUtils.DeleteNetworkPolicy(namespaces["x"], "allow-x-b-to-x-a"), t) - failOnError(data.UpdateNamespace(namespaces["x"], func(namespace *v1.Namespace) { + failOnError(k8sUtils.DeleteNetworkPolicy(getNS("x"), "allow-x-b-to-x-a"), t) + failOnError(data.UpdateNamespace(getNS("x"), func(namespace *v1.Namespace) { delete(namespace.Annotations, networkpolicy.EnableNPLoggingAnnotationKey) }), t) } @@ -2719,23 +2761,23 @@ func testAuditLoggingEnableK8s(t *testing.T, data *TestData) { // testAuditLoggingK8sService tests that audit logs are generated for K8s Service access // tests both Allow traffic by K8s NP and Drop traffic by implicit K8s policy drop func testAuditLoggingK8sService(t *testing.T, data *TestData) { - failOnError(data.updateNamespaceWithAnnotations(namespaces["x"], map[string]string{networkpolicy.EnableNPLoggingAnnotationKey: "true"}), t) + failOnError(data.updateNamespaceWithAnnotations(getNS("x"), map[string]string{networkpolicy.EnableNPLoggingAnnotationKey: "true"}), t) // Create and expose nginx service on the same node as pod x/a - podXA, err := k8sUtils.GetPodByLabel(namespaces["x"], "a") + podXA, err := k8sUtils.GetPodByLabel(getNS("x"), "a") if err != nil { t.Errorf("Failed to get Pod in Namespace x with label 'pod=a': %v", err) } serverNode := podXA.Spec.NodeName serviceName := "nginx" - serverPodName, serverIP, nginxCleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-server-", serverNode, namespaces["x"], false) + serverPodName, serverIP, nginxCleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-server-", serverNode, getNS("x"), false) defer nginxCleanupFunc() serverPort := int32(80) ipFamily := v1.IPv4Protocol - if IPFamily(podIPs[namespaces["x"]+"/a"][0]) == "v6" { + if IPFamily(podIPs[getPodName("x", "a")][0]) == "v6" { ipFamily = v1.IPv6Protocol } - service, err := data.CreateService(serviceName, namespaces["x"], serverPort, serverPort, map[string]string{"app": "nginx"}, false, false, v1.ServiceTypeClusterIP, &ipFamily) + service, err := data.CreateService(serviceName, getNS("x"), serverPort, serverPort, map[string]string{"app": "nginx"}, false, false, v1.ServiceTypeClusterIP, &ipFamily) if err != nil { t.Fatalf("Error when creating nginx service: %v", err) } @@ -2745,12 +2787,12 @@ func testAuditLoggingK8sService(t *testing.T, data *TestData) { // Pod x/a to service nginx which default denies other ingress including from Pod x/b to service nginx npName := "allow-xa-to-service" k8sNPBuilder := &NetworkPolicySpecBuilder{} - k8sNPBuilder = k8sNPBuilder.SetName(namespaces["x"], npName). + k8sNPBuilder = k8sNPBuilder.SetName(getNS("x"), npName). SetPodSelector(map[string]string{"app": serviceName}). SetTypeIngress(). AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, map[string]string{"pod": "a"}, nil, nil, nil) - npRef := fmt.Sprintf("K8sNetworkPolicy:%s/%s", namespaces["x"], npName) + npRef := fmt.Sprintf("K8sNetworkPolicy:%s/%s", getNS("x"), npName) knp, err := k8sUtils.CreateOrUpdateNetworkPolicy(k8sNPBuilder.Get()) failOnError(err, t) @@ -2761,7 +2803,7 @@ func testAuditLoggingK8sService(t *testing.T, data *TestData) { // matcher2 is for connections dropped by the isolated behavior of the K8s NP matcher2 := NewAuditLogMatcher("K8sNetworkPolicy", "", "Ingress", "Drop") - appliedToRef := fmt.Sprintf("%s/%s", namespaces["x"], serverPodName) + appliedToRef := fmt.Sprintf("%s/%s", getNS("x"), serverPodName) // generate some traffic that wget the nginx service var wg sync.WaitGroup @@ -2776,31 +2818,31 @@ func testAuditLoggingK8sService(t *testing.T, data *TestData) { }() } } - oneProbe(namespaces["x"], "a", matcher1) - oneProbe(namespaces["x"], "b", matcher2) + oneProbe(getNS("x"), "a", matcher1) + oneProbe(getNS("x"), "b", matcher2) wg.Wait() checkAuditLoggingResult(t, data, serverNode, "K8sNetworkPolicy", append(matcher1.Matchers(), matcher2.Matchers()...)) - failOnError(k8sUtils.DeleteNetworkPolicy(namespaces["x"], npName), t) - failOnError(data.UpdateNamespace(namespaces["x"], func(namespace *v1.Namespace) { + failOnError(k8sUtils.DeleteNetworkPolicy(getNS("x"), npName), t) + failOnError(data.UpdateNamespace(getNS("x"), func(namespace *v1.Namespace) { delete(namespace.Annotations, networkpolicy.EnableNPLoggingAnnotationKey) }), t) } func testAppliedToPerRule(t *testing.T) { builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(namespaces["y"], "np1").SetPriority(1.0) + builder = builder.SetName(getNS("y"), "np1").SetPriority(1.0) annpATGrp1 := ANNPAppliedToSpec{PodSelector: map[string]string{"pod": "a"}, PodSelectorMatchExp: nil} annpATGrp2 := ANNPAppliedToSpec{PodSelector: map[string]string{"pod": "b"}, PodSelectorMatchExp: nil} - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, nil, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": getNS("x")}, nil, nil, nil, nil, []ANNPAppliedToSpec{annpATGrp1}, crdv1beta1.RuleActionDrop, "", "") - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["z"]}, nil, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": getNS("x")}, nil, nil, nil, nil, []ANNPAppliedToSpec{annpATGrp2}, crdv1beta1.RuleActionDrop, "", "") reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped) - reachability.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["y"]+"/b"), Dropped) + reachability.Expect(getPod("x", "b"), getPod("y", "a"), Dropped) + reachability.Expect(getPod("z", "b"), getPod("y", "b"), Dropped) testStep := []*TestStep{ { "Port 80", @@ -2817,18 +2859,18 @@ func testAppliedToPerRule(t *testing.T) { builder2 = builder2.SetName("cnp1").SetPriority(1.0) cnpATGrp1 := ACNPAppliedToSpec{PodSelector: map[string]string{"pod": "a"}, PodSelectorMatchExp: nil} cnpATGrp2 := ACNPAppliedToSpec{ - PodSelector: map[string]string{"pod": "b"}, NSSelector: map[string]string{"ns": namespaces["y"]}, + PodSelector: map[string]string{"pod": "b"}, NSSelector: map[string]string{"ns": getNS("y")}, PodSelectorMatchExp: nil, NSSelectorMatchExp: nil} - builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["x"]}, - nil, nil, nil, false, []ACNPAppliedToSpec{cnpATGrp1}, crdv1beta1.RuleActionDrop, "", "", nil) - builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, nil, false, []ACNPAppliedToSpec{cnpATGrp2}, crdv1beta1.RuleActionDrop, "", "", nil) + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": getNS("x")}, + nil, nil, nil, nil, nil, []ACNPAppliedToSpec{cnpATGrp1}, crdv1beta1.RuleActionDrop, "", "", nil) + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, []ACNPAppliedToSpec{cnpATGrp2}, crdv1beta1.RuleActionDrop, "", "", nil) reachability2 := NewReachability(allPods, Connected) - reachability2.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["x"]+"/a"), Dropped) - reachability2.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped) - reachability2.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["z"]+"/a"), Dropped) - reachability2.Expect(Pod(namespaces["z"]+"/b"), Pod(namespaces["y"]+"/b"), Dropped) + reachability2.Expect(getPod("x", "b"), getPod("x", "a"), Dropped) + reachability2.Expect(getPod("x", "b"), getPod("y", "a"), Dropped) + reachability2.Expect(getPod("x", "b"), getPod("z", "a"), Dropped) + reachability2.Expect(getPod("z", "b"), getPod("y", "b"), Dropped) testStep2 := []*TestStep{ { "Port 80", @@ -2849,23 +2891,23 @@ func testAppliedToPerRule(t *testing.T) { } func testACNPClusterGroupServiceRefCreateAndUpdate(t *testing.T, data *TestData) { - svc1 := k8sUtils.BuildService("svc1", namespaces["x"], 80, 80, map[string]string{"app": "a"}, nil) - svc2 := k8sUtils.BuildService("svc2", namespaces["y"], 80, 80, map[string]string{"app": "b"}, nil) + svc1 := k8sUtils.BuildService("svc1", getNS("x"), 80, 80, map[string]string{"app": "a"}, nil) + svc2 := k8sUtils.BuildService("svc2", getNS("y"), 80, 80, map[string]string{"app": "b"}, nil) cg1Name, cg2Name := "cg-svc1", "cg-svc2" cgBuilder1 := &ClusterGroupSpecBuilder{} - cgBuilder1 = cgBuilder1.SetName(cg1Name).SetServiceReference(namespaces["x"], "svc1") + cgBuilder1 = cgBuilder1.SetName(cg1Name).SetServiceReference(getNS("x"), "svc1") cgBuilder2 := &ClusterGroupSpecBuilder{} - cgBuilder2 = cgBuilder2.SetName(cg2Name).SetServiceReference(namespaces["y"], "svc2") + cgBuilder2 = cgBuilder2.SetName(cg2Name).SetServiceReference(getNS("y"), "svc2") builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("cnp-cg-svc-ref").SetPriority(1.0).SetAppliedToGroup([]ACNPAppliedToSpec{{Group: cg1Name}}) - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - false, nil, crdv1beta1.RuleActionDrop, cg2Name, "", nil) + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, + nil, nil, nil, nil, crdv1beta1.RuleActionDrop, cg2Name, "", nil) // Pods backing svc1 (label pod=a) in Namespace x should not allow ingress from Pods backing svc2 (label pod=b) in Namespace y. reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["y"]+"/b"), Pod(namespaces["x"]+"/a"), Dropped) + reachability.Expect(getPod("y", "b"), getPod("x", "a"), Dropped) testStep1 := &TestStep{ "Port 80", reachability, @@ -2877,19 +2919,19 @@ func testACNPClusterGroupServiceRefCreateAndUpdate(t *testing.T, data *TestData) } // Test update selector of Service referred in cg-svc1, and update serviceReference of cg-svc2. - svc1Updated := k8sUtils.BuildService("svc1", namespaces["x"], 80, 80, map[string]string{"app": "b"}, nil) - svc3 := k8sUtils.BuildService("svc3", namespaces["y"], 80, 80, map[string]string{"app": "a"}, nil) + svc1Updated := k8sUtils.BuildService("svc1", getNS("x"), 80, 80, map[string]string{"app": "b"}, nil) + svc3 := k8sUtils.BuildService("svc3", getNS("y"), 80, 80, map[string]string{"app": "a"}, nil) svc1PodName := randName("test-pod-svc1-") svc3PodName := randName("test-pod-svc3-") - cgBuilder2Updated := cgBuilder2.SetServiceReference(namespaces["y"], "svc3") + cgBuilder2Updated := cgBuilder2.SetServiceReference(getNS("y"), "svc3") cp := []*CustomProbe{ { SourcePod: CustomPod{ - Pod: NewPod(namespaces["y"], svc3PodName), + Pod: NewPod(getNS("y"), svc3PodName), Labels: map[string]string{"pod": svc3PodName, "app": "a"}, }, DestPod: CustomPod{ - Pod: NewPod(namespaces["x"], svc1PodName), + Pod: NewPod(getNS("x"), svc1PodName), Labels: map[string]string{"pod": svc1PodName, "app": "b"}, }, ExpectConnectivity: Dropped, @@ -2899,7 +2941,7 @@ func testACNPClusterGroupServiceRefCreateAndUpdate(t *testing.T, data *TestData) // Pods backing svc1 (label pod=b) in namespace x should not allow ingress from Pods backing svc3 (label pod=a) in namespace y. reachability2 := NewReachability(allPods, Connected) - reachability2.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["x"]+"/b"), Dropped) + reachability2.Expect(getPod("y", "a"), getPod("x", "b"), Dropped) testStep2 := &TestStep{ "Port 80 updated", reachability2, @@ -2912,9 +2954,9 @@ func testACNPClusterGroupServiceRefCreateAndUpdate(t *testing.T, data *TestData) builderUpdated := &ClusterNetworkPolicySpecBuilder{} builderUpdated = builderUpdated.SetName("cnp-cg-svc-ref").SetPriority(1.0) - builderUpdated.SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}}) - builderUpdated.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["y"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builderUpdated.SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": getNS("x")}}}) + builderUpdated.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": getNS("y")}, + nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) // Pod x/a should not allow ingress from y/b per the updated ACNP spec. testStep3 := &TestStep{ @@ -2935,18 +2977,18 @@ func testACNPClusterGroupServiceRefCreateAndUpdate(t *testing.T, data *TestData) } func testACNPNestedClusterGroupCreateAndUpdate(t *testing.T, data *TestData) { - svc1 := k8sUtils.BuildService("svc1", namespaces["x"], 80, 80, map[string]string{"app": "a"}, nil) + svc1 := k8sUtils.BuildService("svc1", getNS("x"), 80, 80, map[string]string{"app": "a"}, nil) svc1PodName := randName("test-pod-svc1-") cg1Name, cg2Name, cg3Name := "cg-svc-x-a", "cg-select-y-b", "cg-select-y-c" cgBuilder1 := &ClusterGroupSpecBuilder{} - cgBuilder1 = cgBuilder1.SetName(cg1Name).SetServiceReference(namespaces["x"], "svc1") + cgBuilder1 = cgBuilder1.SetName(cg1Name).SetServiceReference(getNS("x"), "svc1") cgBuilder2 := &ClusterGroupSpecBuilder{} cgBuilder2 = cgBuilder2.SetName(cg2Name). - SetNamespaceSelector(map[string]string{"ns": namespaces["y"]}, nil). + SetNamespaceSelector(map[string]string{"ns": getNS("y")}, nil). SetPodSelector(map[string]string{"pod": "b"}, nil) cgBuilder3 := &ClusterGroupSpecBuilder{} cgBuilder3 = cgBuilder3.SetName(cg3Name). - SetNamespaceSelector(map[string]string{"ns": namespaces["y"]}, nil). + SetNamespaceSelector(map[string]string{"ns": getNS("y")}, nil). SetPodSelector(map[string]string{"pod": "c"}, nil) cgNestedName := "cg-nested" cgBuilderNested := &ClusterGroupSpecBuilder{} @@ -2954,15 +2996,15 @@ func testACNPNestedClusterGroupCreateAndUpdate(t *testing.T, data *TestData) { builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("cnp-nested-cg").SetPriority(1.0). - SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["z"]}}}). - AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - false, nil, crdv1beta1.RuleActionDrop, cgNestedName, "", nil) + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("z")}}}). + AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, + nil, nil, nil, nil, crdv1beta1.RuleActionDrop, cgNestedName, "", nil) // Pods in Namespace z should not allow traffic from Pods backing svc1 (label pod=a) in Namespace x. // Note that in this testStep cg3 will not be created yet, so even though cg-nested selects cg1 and // cg3 as childGroups, only members of cg1 will be included as this time. reachability := NewReachability(allPods, Connected) - reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) + reachability.ExpectEgressToNamespace(getPod("x", "a"), getNS("z"), Dropped) testStep1 := &TestStep{ "Port 80", @@ -2979,17 +3021,17 @@ func testACNPNestedClusterGroupCreateAndUpdate(t *testing.T, data *TestData) { cgBuilderNested = cgBuilderNested.SetChildGroups([]string{cg1Name, cg2Name, cg3Name}) // In addition to x/a, all traffic from y/b to Namespace z should also be denied. reachability2 := NewReachability(allPods, Connected) - reachability2.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) - reachability2.ExpectEgressToNamespace(Pod(namespaces["y"]+"/b"), namespaces["z"], Dropped) + reachability2.ExpectEgressToNamespace(getPod("x", "a"), getNS("z"), Dropped) + reachability2.ExpectEgressToNamespace(getPod("y", "b"), getNS("z"), Dropped) // New member in cg-svc-x-a should be reflected in cg-nested as well. cp := []*CustomProbe{ { SourcePod: CustomPod{ - Pod: NewPod(namespaces["x"], svc1PodName), + Pod: NewPod(getNS("x"), svc1PodName), Labels: map[string]string{"pod": svc1PodName, "app": "a"}, }, DestPod: CustomPod{ - Pod: NewPod(namespaces["z"], "test-add-pod-ns-z"), + Pod: NewPod(getNS("z"), "test-add-pod-ns-z"), Labels: map[string]string{"pod": "test-add-pod-ns-z"}, }, ExpectConnectivity: Dropped, @@ -3009,9 +3051,9 @@ func testACNPNestedClusterGroupCreateAndUpdate(t *testing.T, data *TestData) { // In this testStep cg3 is created. It's members should reflect in cg-nested // and as a result, all traffic from y/c to Namespace z should be denied as well. reachability3 := NewReachability(allPods, Connected) - reachability3.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) - reachability3.ExpectEgressToNamespace(Pod(namespaces["y"]+"/b"), namespaces["z"], Dropped) - reachability3.ExpectEgressToNamespace(Pod(namespaces["y"]+"/c"), namespaces["z"], Dropped) + reachability3.ExpectEgressToNamespace(getPod("x", "a"), getNS("z"), Dropped) + reachability3.ExpectEgressToNamespace(getPod("y", "b"), getNS("z"), Dropped) + reachability3.ExpectEgressToNamespace(getPod("y", "c"), getNS("z"), Dropped) testStep3 := &TestStep{ "Port 80 updated", reachability3, @@ -3030,8 +3072,8 @@ func testACNPNestedClusterGroupCreateAndUpdate(t *testing.T, data *TestData) { } func testACNPNestedIPBlockClusterGroupCreateAndUpdate(t *testing.T) { - podXAIP, _ := podIPs[namespaces["x"]+"/a"] - podXBIP, _ := podIPs[namespaces["x"]+"/b"] + podXAIP, _ := podIPs[getPodName("x", "a")] + podXBIP, _ := podIPs[getPodName("x", "b")] genCIDR := func(ip string) string { switch IPFamily(ip) { case "v4": @@ -3062,15 +3104,15 @@ func testACNPNestedIPBlockClusterGroupCreateAndUpdate(t *testing.T) { SetAppliedToGroup([]ACNPAppliedToSpec{ { PodSelector: map[string]string{"pod": "a"}, - NSSelector: map[string]string{"ns": namespaces["y"]}, + NSSelector: map[string]string{"ns": getNS("y")}, }, }) - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgParentName, "", nil) + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, + nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, cgParentName, "", nil) reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["y"]+"/a"), Dropped) - reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped) + reachability.Expect(getPod("x", "a"), getPod("y", "a"), Dropped) + reachability.Expect(getPod("x", "b"), getPod("y", "a"), Dropped) testStep := &TestStep{ "Port 80", reachability, @@ -3083,14 +3125,14 @@ func testACNPNestedIPBlockClusterGroupCreateAndUpdate(t *testing.T) { cgBuilder3 := &ClusterGroupSpecBuilder{} cgBuilder3 = cgBuilder3.SetName(cg3Name). - SetNamespaceSelector(map[string]string{"ns": namespaces["x"]}, nil). + SetNamespaceSelector(map[string]string{"ns": getNS("x")}, nil). SetPodSelector(map[string]string{"pod": "c"}, nil) updatedCGParent := &ClusterGroupSpecBuilder{} updatedCGParent = updatedCGParent.SetName(cgParentName).SetChildGroups([]string{cg1Name, cg3Name}) reachability2 := NewReachability(allPods, Connected) - reachability2.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["y"]+"/a"), Dropped) - reachability2.Expect(Pod(namespaces["x"]+"/c"), Pod(namespaces["y"]+"/a"), Dropped) + reachability2.Expect(getPod("x", "a"), getPod("y", "a"), Dropped) + reachability2.Expect(getPod("x", "c"), getPod("y", "a"), Dropped) testStep2 := &TestStep{ "Port 80, updated", reachability2, @@ -3114,10 +3156,10 @@ func testACNPNamespaceIsolation(t *testing.T) { SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{}}}) // deny ingress traffic except from own namespace, which is always allowed. - builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - true, nil, crdv1beta1.RuleActionAllow, "", "", nil) - builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, nil, nil, - false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, + nil, nil, selfNamespace, nil, crdv1beta1.RuleActionAllow, "", "", nil) + builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, nil, + nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachability := NewReachability(allPods, Dropped) reachability.ExpectAllSelfNamespace(Connected) @@ -3136,17 +3178,17 @@ func testACNPNamespaceIsolation(t *testing.T) { SetTier("baseline"). SetPriority(1.0) builder2.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - true, []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}, crdv1beta1.RuleActionAllow, "", "", nil) - builder2.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, nil, nil, - false, []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}, crdv1beta1.RuleActionDrop, "", "", nil) + selfNamespace, []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}, crdv1beta1.RuleActionAllow, "", "", nil) + builder2.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, nil, nil, nil, + nil, []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}, crdv1beta1.RuleActionDrop, "", "", nil) reachability2 := NewReachability(allPods, Connected) - reachability2.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["y"], Dropped) - reachability2.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) - reachability2.ExpectEgressToNamespace(Pod(namespaces["x"]+"/b"), namespaces["y"], Dropped) - reachability2.ExpectEgressToNamespace(Pod(namespaces["x"]+"/b"), namespaces["z"], Dropped) - reachability2.ExpectEgressToNamespace(Pod(namespaces["x"]+"/c"), namespaces["y"], Dropped) - reachability2.ExpectEgressToNamespace(Pod(namespaces["x"]+"/c"), namespaces["z"], Dropped) + reachability2.ExpectEgressToNamespace(getPod("x", "a"), getNS("y"), Dropped) + reachability2.ExpectEgressToNamespace(getPod("x", "a"), getNS("z"), Dropped) + reachability2.ExpectEgressToNamespace(getPod("x", "b"), getNS("y"), Dropped) + reachability2.ExpectEgressToNamespace(getPod("x", "b"), getNS("z"), Dropped) + reachability2.ExpectEgressToNamespace(getPod("x", "c"), getNS("y"), Dropped) + reachability2.ExpectEgressToNamespace(getPod("x", "c"), getNS("z"), Dropped) testStep2 := &TestStep{ "Port 80", reachability2, @@ -3171,9 +3213,9 @@ func testACNPStrictNamespacesIsolation(t *testing.T) { SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{}}}) builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - true, nil, crdv1beta1.RuleActionPass, "", "", nil) - builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, nil, nil, - false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + selfNamespace, nil, crdv1beta1.RuleActionPass, "", "", nil) + builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, nil, nil, nil, + nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) // deny ingress traffic except from own namespace, which is delegated to Namespace owners (who can create K8s // NetworkPolicies to regulate intra-Namespace traffic) reachability := NewReachability(allPods, Dropped) @@ -3190,11 +3232,11 @@ func testACNPStrictNamespacesIsolation(t *testing.T) { // Add a K8s namespaced NetworkPolicy in ns x that isolates all Pods in that namespace. builder2 := &NetworkPolicySpecBuilder{} - builder2 = builder2.SetName(namespaces["x"], "default-deny-in-namespace-x") + builder2 = builder2.SetName(getNS("x"), "default-deny-in-namespace-x") builder2.SetTypeIngress() reachability2 := NewReachability(allPods, Dropped) reachability2.ExpectAllSelfNamespace(Connected) - reachability2.ExpectSelfNamespace(namespaces["x"], Dropped) + reachability2.ExpectSelfNamespace(getNS("x"), Dropped) reachability2.ExpectSelf(allPods, Connected) testStep2 := &TestStep{ "Namespace isolation with K8s NP, Port 80", @@ -3240,31 +3282,31 @@ func testFQDNPolicy(t *testing.T) { // All client Pods below are randomly chosen from test Namespaces. testcases := []podToAddrTestStep{ { - Pod(namespaces["x"] + "/a"), + Pod(getNS("x") + "/a"), "docs.github.com", 80, Rejected, }, { - Pod(namespaces["x"] + "/b"), + Pod(getNS("x") + "/b"), "api.github.com", 80, Rejected, }, { - Pod(namespaces["y"] + "/a"), + Pod(getNS("y") + "/a"), "wayfair.com", 80, Dropped, }, { - Pod(namespaces["y"] + "/b"), + Pod(getNS("y") + "/b"), "stackoverflow.com", 80, Dropped, }, { - Pod(namespaces["z"] + "/a"), + Pod(getNS("z") + "/a"), "facebook.com", 80, Connected, @@ -3292,7 +3334,7 @@ func testFQDNPolicy(t *testing.T) { // policies, to avoid having a dependency on external connectivity. The reason we // use headless Service is that FQDN will use the IP from DNS A/AAAA records to // implement flows in the egress policy table. For a non-headless Service, the DNS -// name resolves to the ClusterIP for the Service. But when traffic arrives to the +// Name resolves to the ClusterIP for the Service. But when traffic arrives to the // egress table, the dstIP has already been DNATed to the Endpoints IP by // AntreaProxy Service Load-Balancing, and the policies are not enforced correctly. // For a headless Service, the Endpoints IP will be directly returned by the DNS @@ -3303,13 +3345,13 @@ func testFQDNPolicyInClusterService(t *testing.T) { defer log.SetLevel(logLevel) var services []*v1.Service if clusterInfo.podV4NetworkCIDR != "" { - ipv4Svc := k8sUtils.BuildService("ipv4-svc", namespaces["x"], 80, 80, map[string]string{"pod": "a"}, nil) + ipv4Svc := k8sUtils.BuildService("ipv4-svc", getNS("x"), 80, 80, map[string]string{"pod": "a"}, nil) ipv4Svc.Spec.ClusterIP = "None" ipv4Svc.Spec.IPFamilies = []v1.IPFamily{v1.IPv4Protocol} services = append(services, ipv4Svc) } if clusterInfo.podV6NetworkCIDR != "" { - ipv6Svc := k8sUtils.BuildService("ipv6-svc", namespaces["x"], 80, 80, map[string]string{"pod": "b"}, nil) + ipv6Svc := k8sUtils.BuildService("ipv6-svc", getNS("x"), 80, 80, map[string]string{"pod": "b"}, nil) ipv6Svc.Spec.ClusterIP = "None" ipv6Svc.Spec.IPFamilies = []v1.IPFamily{v1.IPv6Protocol} services = append(services, ipv6Svc) @@ -3329,8 +3371,8 @@ func testFQDNPolicyInClusterService(t *testing.T) { SetTier("application"). SetPriority(1.0) for idx, service := range services { - builder.AddFQDNRule(svcDNSName(service), ProtocolTCP, nil, nil, nil, fmt.Sprintf("r%d", idx*2), []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["y"]}, PodSelector: map[string]string{"pod": "b"}}}, crdv1beta1.RuleActionReject) - builder.AddFQDNRule(svcDNSName(service), ProtocolTCP, nil, nil, nil, fmt.Sprintf("r%d", idx*2+1), []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["z"]}, PodSelector: map[string]string{"pod": "c"}}}, crdv1beta1.RuleActionDrop) + builder.AddFQDNRule(svcDNSName(service), ProtocolTCP, nil, nil, nil, fmt.Sprintf("r%d", idx*2), []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("y")}, PodSelector: map[string]string{"pod": "b"}}}, crdv1beta1.RuleActionReject) + builder.AddFQDNRule(svcDNSName(service), ProtocolTCP, nil, nil, nil, fmt.Sprintf("r%d", idx*2+1), []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("z")}, PodSelector: map[string]string{"pod": "c"}}}, crdv1beta1.RuleActionDrop) } acnp := builder.Get() k8sUtils.CreateOrUpdateACNP(acnp) @@ -3340,8 +3382,8 @@ func testFQDNPolicyInClusterService(t *testing.T) { for _, service := range services { eachServiceCases := []podToAddrTestStep{ { - Pod(namespaces["y"] + "/b"), - // To indicate the server name is a FQDN, end it with a dot. Then DNS resolver won't attempt to append + Pod(getNS("y") + "/b"), + // To indicate the server Name is a FQDN, end it with a dot. Then DNS resolver won't attempt to append // domain names (e.g. svc.cluster.local, cluster.local) when resolving it, making it get resolution // result more quickly. svcDNSName(service) + ".", @@ -3349,13 +3391,13 @@ func testFQDNPolicyInClusterService(t *testing.T) { Rejected, }, { - Pod(namespaces["z"] + "/c"), + Pod(getNS("z") + "/c"), svcDNSName(service) + ".", 80, Dropped, }, { - Pod(namespaces["x"] + "/c"), + Pod(getNS("x") + "/c"), svcDNSName(service) + ".", 80, Connected, @@ -3398,7 +3440,7 @@ func testFQDNPolicyTCP(t *testing.T) { builder.AddFQDNRule("github.com", ProtocolTCP, nil, nil, nil, "", nil, crdv1beta1.RuleActionDrop) testcases := []podToAddrTestStep{ { - Pod(namespaces["y"] + "/a"), + getPod("y", "a"), "github.com", 80, Dropped, @@ -3431,12 +3473,12 @@ func testToServices(t *testing.T, data *TestData) { skipIfProxyDisabled(t, data) var services []*v1.Service if clusterInfo.podV4NetworkCIDR != "" { - ipv4Svc := k8sUtils.BuildService("ipv4-svc", namespaces["x"], 81, 81, map[string]string{"pod": "a"}, nil) + ipv4Svc := k8sUtils.BuildService("ipv4-svc", getNS("x"), 81, 81, map[string]string{"pod": "a"}, nil) ipv4Svc.Spec.IPFamilies = []v1.IPFamily{v1.IPv4Protocol} services = append(services, ipv4Svc) } if clusterInfo.podV6NetworkCIDR != "" { - ipv6Svc := k8sUtils.BuildService("ipv6-svc", namespaces["x"], 80, 80, map[string]string{"pod": "a"}, nil) + ipv6Svc := k8sUtils.BuildService("ipv6-svc", getNS("x"), 80, 80, map[string]string{"pod": "a"}, nil) ipv6Svc.Spec.IPFamilies = []v1.IPFamily{v1.IPv6Protocol} services = append(services, ipv6Svc) } @@ -3457,8 +3499,8 @@ func testToServices(t *testing.T, data *TestData) { builder = builder.SetName("test-acnp-to-services"). SetTier("application"). SetPriority(1.0) - builder.AddToServicesRule(svcRefs, "x-to-svc", []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}, crdv1beta1.RuleActionDrop) - builder.AddToServicesRule(svcRefs, "y-to-svc", []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["y"]}}}, crdv1beta1.RuleActionDrop) + builder.AddToServicesRule(svcRefs, "x-to-svc", []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}, crdv1beta1.RuleActionDrop) + builder.AddToServicesRule(svcRefs, "y-to-svc", []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("y")}}}, crdv1beta1.RuleActionDrop) time.Sleep(networkPolicyDelay) acnp := builder.Get() @@ -3469,19 +3511,19 @@ func testToServices(t *testing.T, data *TestData) { for _, service := range builtSvcs { eachServiceCases := []podToAddrTestStep{ { - Pod(namespaces["x"] + "/a"), + getPod("x", "a"), service.Spec.ClusterIP, service.Spec.Ports[0].Port, Dropped, }, { - Pod(namespaces["y"] + "/b"), + getPod("y", "b"), service.Spec.ClusterIP, service.Spec.Ports[0].Port, Dropped, }, { - Pod(namespaces["z"] + "/c"), + Pod(getNS("z") + "/c"), service.Spec.ClusterIP, service.Spec.Ports[0].Port, Connected, @@ -3509,21 +3551,21 @@ func testToServices(t *testing.T, data *TestData) { } func testServiceAccountSelector(t *testing.T, data *TestData) { - k8sUtils.CreateOrUpdateServiceAccount(k8sUtils.BuildServiceAccount("test-sa", namespaces["x"], nil)) - defer k8sUtils.DeleteServiceAccount(namespaces["x"], "test-sa") + k8sUtils.CreateOrUpdateServiceAccount(k8sUtils.BuildServiceAccount("test-sa", getNS("x"), nil)) + defer k8sUtils.DeleteServiceAccount(getNS("x"), "test-sa") serverName, serverIP, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "server", controlPlaneNodeName(), data.testNamespace, false) defer cleanupFunc() - client0Name, _, cleanupFunc := createAndWaitForPodWithServiceAccount(t, data, data.createAgnhostPodWithSAOnNode, "client", controlPlaneNodeName(), namespaces["x"], false, "test-sa") + client0Name, _, cleanupFunc := createAndWaitForPodWithServiceAccount(t, data, data.createAgnhostPodWithSAOnNode, "client", controlPlaneNodeName(), getNS("x"), false, "test-sa") defer cleanupFunc() - client1Name, _, cleanupFunc := createAndWaitForPodWithServiceAccount(t, data, data.createAgnhostPodWithSAOnNode, "client", controlPlaneNodeName(), namespaces["x"], false, "default") + client1Name, _, cleanupFunc := createAndWaitForPodWithServiceAccount(t, data, data.createAgnhostPodWithSAOnNode, "client", controlPlaneNodeName(), getNS("x"), false, "default") defer cleanupFunc() sa := &crdv1beta1.NamespacedName{ Name: "test-sa", - Namespace: namespaces["x"], + Namespace: getNS("x"), } builder := &ClusterNetworkPolicySpecBuilder{} @@ -3531,7 +3573,7 @@ func testServiceAccountSelector(t *testing.T, data *TestData) { SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": serverName}}}) builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", sa) + nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", sa) acnp := builder.Get() _, err := k8sUtils.CreateOrUpdateACNP(acnp) @@ -3544,13 +3586,13 @@ func testServiceAccountSelector(t *testing.T, data *TestData) { if clusterInfo.podV4NetworkCIDR != "" { ipv4Testcases := []podToAddrTestStep{ { - Pod(namespaces["x"] + "/" + client0Name), + getPod("x", client0Name), serverIP.IPv4.String(), 80, Dropped, }, { - Pod(namespaces["x"] + "/" + client1Name), + getPod("x", client1Name), serverIP.IPv4.String(), 80, Connected, @@ -3562,13 +3604,13 @@ func testServiceAccountSelector(t *testing.T, data *TestData) { if clusterInfo.podV6NetworkCIDR != "" { ipv6Testcases := []podToAddrTestStep{ { - Pod(namespaces["x"] + "/" + client0Name), + getPod("x", client0Name), serverIP.IPv6.String(), 80, Dropped, }, { - Pod(namespaces["x"] + "/" + client1Name), + getPod("x", client1Name), serverIP.IPv6.String(), 80, Connected, @@ -3597,20 +3639,20 @@ func testACNPNodeSelectorEgress(t *testing.T) { SetPriority(1.0) nodeSelector := metav1.LabelSelector{MatchLabels: map[string]string{"kubernetes.io/hostname": controlPlaneNodeName()}} builder.AddNodeSelectorRule(&nodeSelector, ProtocolTCP, &p6443, "egress-control-plane-drop", - []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}, PodSelector: map[string]string{"pod": "a"}}}, + []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}, PodSelector: map[string]string{"pod": "a"}}}, crdv1beta1.RuleActionDrop, true) var testcases []podToAddrTestStep if clusterInfo.podV4NetworkCIDR != "" { ipv4Testcases := []podToAddrTestStep{ { - Pod(namespaces["x"] + "/a"), + Pod(getNS("x") + "/a"), controlPlaneNodeIPv4(), 6443, Dropped, }, { - Pod(namespaces["x"] + "/b"), + Pod(getNS("x") + "/b"), controlPlaneNodeIPv4(), 6443, Connected, @@ -3622,13 +3664,13 @@ func testACNPNodeSelectorEgress(t *testing.T) { if clusterInfo.podV6NetworkCIDR != "" { ipv6Testcases := []podToAddrTestStep{ { - Pod(namespaces["x"] + "/a"), + Pod(getNS("x") + "/a"), controlPlaneNodeIPv6(), 6443, Dropped, }, { - Pod(namespaces["x"] + "/b"), + Pod(getNS("x") + "/b"), controlPlaneNodeIPv6(), 6443, Connected, @@ -3655,16 +3697,16 @@ func testACNPNodeSelectorEgress(t *testing.T) { } func testACNPNodeSelectorIngress(t *testing.T, data *TestData) { - _, serverIP0, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "server0", nodeName(1), namespaces["x"], false) + _, serverIP0, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "server0", nodeName(1), getNS("x"), false) defer cleanupFunc() - _, serverIP1, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "server1", nodeName(1), namespaces["y"], false) + _, serverIP1, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "server1", nodeName(1), getNS("y"), false) defer cleanupFunc() clientName := "agnhost-client" - require.NoError(t, data.createAgnhostPodOnNode(clientName, namespaces["z"], controlPlaneNodeName(), true)) - defer data.DeletePodAndWait(defaultTimeout, clientName, namespaces["z"]) - _, err := data.podWaitForIPs(defaultTimeout, clientName, namespaces["z"]) + require.NoError(t, data.createAgnhostPodOnNode(clientName, getNS("z"), controlPlaneNodeName(), true)) + defer data.DeletePodAndWait(defaultTimeout, clientName, getNS("z")) + _, err := data.podWaitForIPs(defaultTimeout, clientName, getNS("z")) require.NoError(t, err) builder := &ClusterNetworkPolicySpecBuilder{} @@ -3672,20 +3714,20 @@ func testACNPNodeSelectorIngress(t *testing.T, data *TestData) { SetPriority(1.0) nodeSelector := metav1.LabelSelector{MatchLabels: map[string]string{"kubernetes.io/hostname": controlPlaneNodeName()}} builder.AddNodeSelectorRule(&nodeSelector, ProtocolTCP, &p80, "ingress-control-plane-drop", - []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}, + []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}, crdv1beta1.RuleActionDrop, false) testcases := []podToAddrTestStep{} if clusterInfo.podV4NetworkCIDR != "" { ipv4TestCases := []podToAddrTestStep{ { - Pod(namespaces["z"] + "/" + clientName), + getPod("z", clientName), serverIP0.IPv4.String(), 80, Dropped, }, { - Pod(namespaces["z"] + "/" + clientName), + getPod("z", clientName), serverIP1.IPv4.String(), 80, Connected, @@ -3696,13 +3738,13 @@ func testACNPNodeSelectorIngress(t *testing.T, data *TestData) { if clusterInfo.podV6NetworkCIDR != "" { ipv6TestCases := []podToAddrTestStep{ { - Pod(namespaces["z"] + "/" + clientName), + getPod("z", clientName), serverIP0.IPv6.String(), 80, Dropped, }, { - Pod(namespaces["z"] + "/" + clientName), + getPod("z", clientName), serverIP1.IPv6.String(), 80, Connected, @@ -3745,9 +3787,9 @@ func testACNPICMPSupport(t *testing.T, data *TestData) { builder = builder.SetName("test-acnp-icmp"). SetPriority(1.0).SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": clientName}}}) builder.AddEgress(ProtocolICMP, nil, nil, nil, &icmpType, &icmpCode, nil, nil, nil, map[string]string{"antrea-e2e": server0Name}, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionReject, "", "", nil) builder.AddEgress(ProtocolICMP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": server1Name}, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) testcases := []podToAddrTestStep{} if clusterInfo.podV4NetworkCIDR != "" { @@ -3848,7 +3890,7 @@ func testACNPNodePortServiceSupport(t *testing.T, data *TestData, serverNamespac }, }) builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, &cidr, nil, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionReject, "", "", nil) acnp, err := k8sUtils.CreateOrUpdateACNP(builder.Get()) failOnError(err, t) @@ -3940,7 +3982,7 @@ func testACNPIGMPQuery(t *testing.T, data *TestData, acnpName, caseName, groupAd // create acnp with ingress rule for IGMP query igmpType := crdv1beta1.IGMPQuery builder.AddIngress(ProtocolIGMP, nil, nil, nil, nil, nil, &igmpType, &queryGroupAddress, nil, nil, nil, nil, - nil, nil, nil, false, nil, action, "", "", nil) + nil, nil, nil, nil, nil, action, "", "", nil) acnp := builder.Get() _, err = k8sUtils.CreateOrUpdateACNP(acnp) defer data.crdClient.CrdV1beta1().ClusterNetworkPolicies().Delete(context.TODO(), acnp.Name, metav1.DeleteOptions{}) @@ -4021,7 +4063,7 @@ func testACNPMulticastEgress(t *testing.T, data *TestData, acnpName, caseName, g SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": label}}}) cidr := mc.group.String() + "/32" builder.AddEgress(ProtocolUDP, nil, nil, nil, nil, nil, nil, nil, &cidr, nil, nil, nil, - nil, nil, nil, false, nil, action, "", "", nil) + nil, nil, nil, nil, nil, action, "", "", nil) acnp := builder.Get() _, err = k8sUtils.CreateOrUpdateACNP(acnp) if err != nil { @@ -4315,7 +4357,7 @@ func waitForResourcesReady(t *testing.T, timeout time.Duration, objs ...metav1.O } // TestAntreaPolicy is the top-level test which contains all subtests for -// AntreaPolicy related test cases so they can share setup, teardown. +// AntreaPolicy related test cases so that they can share setup and teardown. func TestAntreaPolicy(t *testing.T) { skipIfHasWindowsNodes(t) skipIfAntreaPolicyDisabled(t) @@ -4326,7 +4368,7 @@ func TestAntreaPolicy(t *testing.T) { } defer teardownTest(t, data) - initialize(t, data) + initialize(t, data, formFactorNormal) // This test group only provides one case for each CR, including ACNP, ANNP, Tier, // ClusterGroup and Group to make sure the corresponding validation webhooks is @@ -4476,7 +4518,7 @@ func TestAntreaPolicyStatus(t *testing.T) { annpBuilder = annpBuilder.SetName(data.testNamespace, "annp-applied-to-two-nodes"). SetPriority(1.0). SetAppliedToGroup([]ANNPAppliedToSpec{{PodSelector: map[string]string{"app": "nginx"}}}) - annpBuilder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, nil, + annpBuilder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": getNS("x")}, nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "") annp := annpBuilder.Get() log.Debugf("creating ANNP %v", annp.Name) @@ -4488,8 +4530,8 @@ func TestAntreaPolicyStatus(t *testing.T) { acnpBuilder = acnpBuilder.SetName("acnp-applied-to-two-nodes"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"app": "nginx"}}}) - acnpBuilder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["x"]}, - nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + acnpBuilder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": getNS("x")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) acnp := acnpBuilder.Get() log.Debugf("creating ACNP %v", acnp.Name) _, err = data.crdClient.CrdV1beta1().ClusterNetworkPolicies().Create(context.TODO(), acnp, metav1.CreateOptions{}) @@ -4525,9 +4567,9 @@ func TestAntreaPolicyStatusWithAppliedToPerRule(t *testing.T) { annpBuilder := &AntreaNetworkPolicySpecBuilder{} annpBuilder = annpBuilder.SetName(data.testNamespace, "annp-applied-to-per-rule"). SetPriority(1.0) - annpBuilder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, nil, + annpBuilder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": getNS("x")}, nil, nil, nil, nil, []ANNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": server0Name}}}, crdv1beta1.RuleActionAllow, "", "") - annpBuilder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, nil, + annpBuilder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": getNS("x")}, nil, nil, nil, nil, []ANNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": server1Name}}}, crdv1beta1.RuleActionAllow, "", "") annp := annpBuilder.Get() log.Debugf("creating ANNP %v", annp.Name) @@ -4592,15 +4634,15 @@ func TestAntreaPolicyStatusWithAppliedToUnsupportedGroup(t *testing.T) { } defer teardownTest(t, data) - initialize(t, data) + initialize(t, data, formFactorNormal) - testNamespace := namespaces["x"] + testNamespace := getNS("x") // Build a Group with namespaceSelector selecting namespaces outside testNamespace. grpName := "grp-with-ns-selector" grpBuilder := &GroupSpecBuilder{} grpBuilder = grpBuilder.SetName(grpName).SetNamespace(testNamespace). SetPodSelector(map[string]string{"pod": "b"}, nil). - SetNamespaceSelector(map[string]string{"ns": namespaces["y"]}, nil) + SetNamespaceSelector(map[string]string{"ns": getNS("y")}, nil) grp, err := k8sUtils.CreateOrUpdateGroup(grpBuilder.Get()) failOnError(err, t) failOnError(waitForResourceReady(t, timeout, grp), t) @@ -4711,7 +4753,28 @@ func (data *TestData) waitForACNPRealized(t *testing.T, name string, timeout tim return nil } -// testANNPNetworkPolicyStatsWithDropAction tests antreanetworkpolicystats can correctly collect dropped packets stats from ANNP if +// TestAntreaPolicyStats is the top-level test which contains all subtests for +// AntreaPolicyStats related test cases so they can share setup, teardown. +func TestAntreaPolicyStats(t *testing.T) { + skipIfHasWindowsNodes(t) + skipIfAntreaPolicyDisabled(t) + skipIfNetworkPolicyStatsDisabled(t) + + data, err := setupTest(t) + if err != nil { + t.Fatalf("Error when setting up test: %v", err) + } + defer teardownTest(t, data) + + t.Run("testANNPNetworkPolicyStatsWithDropAction", func(t *testing.T) { + testANNPNetworkPolicyStatsWithDropAction(t, data) + }) + t.Run("testAntreaClusterNetworkPolicyStats", func(t *testing.T) { + testAntreaClusterNetworkPolicyStats(t, data) + }) +} + +// testANPNetworkPolicyStatsWithDropAction tests antreanetworkpolicystats can correctly collect dropped packets stats from ANP if // networkpolicystats feature is enabled func testANNPNetworkPolicyStatsWithDropAction(t *testing.T, data *TestData) { serverName, serverIPs, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-server-", "", data.testNamespace, false) diff --git a/test/e2e/clustergroup_test.go b/test/e2e/clustergroup_test.go index 17267ba788d..9308cd7b32f 100644 --- a/test/e2e/clustergroup_test.go +++ b/test/e2e/clustergroup_test.go @@ -49,7 +49,7 @@ func testInvalidCGIPBlockWithPodSelector(t *testing.T) { func testInvalidCGIPBlockWithNSSelector(t *testing.T) { invalidErr := fmt.Errorf("clustergroup created with ipblock and namespaceSelector") cgName := "ipb-ns" - nSel := &metav1.LabelSelector{MatchLabels: map[string]string{"ns": namespaces["y"]}} + nSel := &metav1.LabelSelector{MatchLabels: map[string]string{"ns": getNS("y")}} cidr := "10.0.0.10/32" ipb := []crdv1beta1.IPBlock{{CIDR: cidr}} cg := &crdv1beta1.ClusterGroup{ @@ -72,7 +72,7 @@ func testInvalidCGServiceRefWithPodSelector(t *testing.T) { cgName := "svcref-pod-selector" pSel := &metav1.LabelSelector{MatchLabels: map[string]string{"pod": "x"}} svcRef := &crdv1beta1.NamespacedName{ - Namespace: namespaces["y"], + Namespace: getNS("y"), Name: "test-svc", } cg := &crdv1beta1.ClusterGroup{ @@ -93,9 +93,9 @@ func testInvalidCGServiceRefWithPodSelector(t *testing.T) { func testInvalidCGServiceRefWithNSSelector(t *testing.T) { invalidErr := fmt.Errorf("clustergroup created with serviceReference and namespaceSelector") cgName := "svcref-ns-selector" - nSel := &metav1.LabelSelector{MatchLabels: map[string]string{"ns": namespaces["y"]}} + nSel := &metav1.LabelSelector{MatchLabels: map[string]string{"ns": getNS("y")}} svcRef := &crdv1beta1.NamespacedName{ - Namespace: namespaces["y"], + Namespace: getNS("y"), Name: "test-svc", } cg := &crdv1beta1.ClusterGroup{ @@ -119,7 +119,7 @@ func testInvalidCGServiceRefWithIPBlock(t *testing.T) { cidr := "10.0.0.10/32" ipb := []crdv1beta1.IPBlock{{CIDR: cidr}} svcRef := &crdv1beta1.NamespacedName{ - Namespace: namespaces["y"], + Namespace: getNS("y"), Name: "test-svc", } cg := &crdv1beta1.ClusterGroup{ @@ -182,7 +182,7 @@ func testInvalidCGChildGroupWithServiceReference(t *testing.T) { invalidErr := fmt.Errorf("clustergroup created with childGroups and ServiceReference") cgName := "child-group-svcref" svcRef := &crdv1beta1.NamespacedName{ - Namespace: namespaces["y"], + Namespace: getNS("y"), Name: "test-svc", } cg := &crdv1beta1.ClusterGroup{ @@ -320,7 +320,7 @@ func TestClusterGroup(t *testing.T) { } defer teardownTest(t, data) - initialize(t, data) + initialize(t, data, formFactorNormal) t.Run("TestGroupClusterGroupValidate", func(t *testing.T) { t.Run("Case=IPBlockWithPodSelectorDenied", func(t *testing.T) { testInvalidCGIPBlockWithPodSelector(t) }) diff --git a/test/e2e/group_test.go b/test/e2e/group_test.go index a35651c7b8f..487d647047d 100644 --- a/test/e2e/group_test.go +++ b/test/e2e/group_test.go @@ -32,7 +32,7 @@ func testInvalidGroupIPBlockWithPodSelector(t *testing.T) { g := &crdv1beta1.Group{ ObjectMeta: metav1.ObjectMeta{ Name: gName, - Namespace: namespaces["x"], + Namespace: getNS("x"), }, Spec: crdv1beta1.GroupSpec{ PodSelector: pSel, @@ -48,13 +48,13 @@ func testInvalidGroupIPBlockWithPodSelector(t *testing.T) { func testInvalidGroupIPBlockWithNSSelector(t *testing.T) { invalidErr := fmt.Errorf("group created with ipblock and namespaceSelector") gName := "ipb-ns" - nSel := &metav1.LabelSelector{MatchLabels: map[string]string{"ns": namespaces["y"]}} + nSel := &metav1.LabelSelector{MatchLabels: map[string]string{"ns": getNS("y")}} cidr := "10.0.0.10/32" ipb := []crdv1beta1.IPBlock{{CIDR: cidr}} g := &crdv1beta1.Group{ ObjectMeta: metav1.ObjectMeta{ Name: gName, - Namespace: namespaces["x"], + Namespace: getNS("x"), }, Spec: crdv1beta1.GroupSpec{ NamespaceSelector: nSel, @@ -72,13 +72,13 @@ func testInvalidGroupServiceRefWithPodSelector(t *testing.T) { gName := "svcref-pod-selector" pSel := &metav1.LabelSelector{MatchLabels: map[string]string{"pod": "x"}} svcRef := &crdv1beta1.NamespacedName{ - Namespace: namespaces["y"], + Namespace: getNS("y"), Name: "test-svc", } g := &crdv1beta1.Group{ ObjectMeta: metav1.ObjectMeta{ Name: gName, - Namespace: namespaces["y"], + Namespace: getNS("y"), }, Spec: crdv1beta1.GroupSpec{ PodSelector: pSel, @@ -94,15 +94,15 @@ func testInvalidGroupServiceRefWithPodSelector(t *testing.T) { func testInvalidGroupServiceRefWithNSSelector(t *testing.T) { invalidErr := fmt.Errorf("group created with serviceReference and namespaceSelector") gName := "svcref-ns-selector" - nSel := &metav1.LabelSelector{MatchLabels: map[string]string{"ns": namespaces["y"]}} + nSel := &metav1.LabelSelector{MatchLabels: map[string]string{"ns": getNS("y")}} svcRef := &crdv1beta1.NamespacedName{ - Namespace: namespaces["y"], + Namespace: getNS("y"), Name: "test-svc", } g := &crdv1beta1.Group{ ObjectMeta: metav1.ObjectMeta{ Name: gName, - Namespace: namespaces["y"], + Namespace: getNS("y"), }, Spec: crdv1beta1.GroupSpec{ NamespaceSelector: nSel, @@ -121,13 +121,13 @@ func testInvalidGroupServiceRefWithIPBlock(t *testing.T) { cidr := "10.0.0.10/32" ipb := []crdv1beta1.IPBlock{{CIDR: cidr}} svcRef := &crdv1beta1.NamespacedName{ - Namespace: namespaces["y"], + Namespace: getNS("y"), Name: "test-svc", } g := &crdv1beta1.Group{ ObjectMeta: metav1.ObjectMeta{ Name: gName, - Namespace: namespaces["y"], + Namespace: getNS("y"), }, Spec: crdv1beta1.GroupSpec{ ServiceReference: svcRef, @@ -149,7 +149,7 @@ func createChildGroupForTest(t *testing.T) { g := &crdv1beta1.Group{ ObjectMeta: metav1.ObjectMeta{ Name: testChildGroupName, - Namespace: namespaces[testChildGroupNamespace], + Namespace: getNS(testChildGroupNamespace), }, Spec: crdv1beta1.GroupSpec{ PodSelector: &metav1.LabelSelector{}, @@ -161,7 +161,7 @@ func createChildGroupForTest(t *testing.T) { } func cleanupChildGroupForTest(t *testing.T) { - if err := k8sUtils.DeleteGroup(namespaces[testChildGroupNamespace], testChildGroupName); err != nil { + if err := k8sUtils.DeleteGroup(getNS(testChildGroupNamespace), testChildGroupName); err != nil { failOnError(err, t) } } @@ -173,7 +173,7 @@ func testInvalidGroupChildGroupWithPodSelector(t *testing.T) { g := &crdv1beta1.Group{ ObjectMeta: metav1.ObjectMeta{ Name: gName, - Namespace: namespaces[testChildGroupNamespace], + Namespace: getNS(testChildGroupNamespace), }, Spec: crdv1beta1.GroupSpec{ PodSelector: pSel, @@ -191,12 +191,12 @@ func testInvalidGroupChildGroupWithServiceReference(t *testing.T) { gName := "child-group-svcref" svcRef := &crdv1beta1.NamespacedName{ Name: "test-svc", - Namespace: namespaces[testChildGroupNamespace], + Namespace: getNS(testChildGroupNamespace), } g := &crdv1beta1.Group{ ObjectMeta: metav1.ObjectMeta{ Name: gName, - Namespace: namespaces[testChildGroupNamespace], + Namespace: getNS(testChildGroupNamespace), }, Spec: crdv1beta1.GroupSpec{ ServiceReference: svcRef, @@ -213,13 +213,13 @@ func testInvalidGroupMaxNestedLevel(t *testing.T) { invalidErr := fmt.Errorf("group created with childGroup which has childGroups itself") gName1, gName2 := "g-nested-1", "g-nested-2" g1 := &crdv1beta1.Group{ - ObjectMeta: metav1.ObjectMeta{Namespace: namespaces[testChildGroupNamespace], Name: gName1}, + ObjectMeta: metav1.ObjectMeta{Namespace: getNS(testChildGroupNamespace), Name: gName1}, Spec: crdv1beta1.GroupSpec{ ChildGroups: []crdv1beta1.ClusterGroupReference{crdv1beta1.ClusterGroupReference(testChildGroupName)}, }, } g2 := &crdv1beta1.Group{ - ObjectMeta: metav1.ObjectMeta{Namespace: namespaces[testChildGroupNamespace], Name: gName2}, + ObjectMeta: metav1.ObjectMeta{Namespace: getNS(testChildGroupNamespace), Name: gName2}, Spec: crdv1beta1.GroupSpec{ ChildGroups: []crdv1beta1.ClusterGroupReference{crdv1beta1.ClusterGroupReference(gName1)}, }, @@ -235,7 +235,7 @@ func testInvalidGroupMaxNestedLevel(t *testing.T) { failOnError(invalidErr, t) } // cleanup g-nested-1 - if err := k8sUtils.DeleteGroup(namespaces[testChildGroupNamespace], gName1); err != nil { + if err := k8sUtils.DeleteGroup(getNS(testChildGroupNamespace), gName1); err != nil { failOnError(err, t) } // Try to create g-nested-2 first and then g-nested-1. @@ -249,7 +249,7 @@ func testInvalidGroupMaxNestedLevel(t *testing.T) { failOnError(invalidErr, t) } // cleanup g-nested-2 - if err := k8sUtils.DeleteGroup(namespaces[testChildGroupNamespace], gName2); err != nil { + if err := k8sUtils.DeleteGroup(getNS(testChildGroupNamespace), gName2); err != nil { failOnError(err, t) } } @@ -263,7 +263,7 @@ func TestGroup(t *testing.T) { t.Fatalf("Error when setting up test: %v", err) } defer teardownTest(t, data) - initialize(t, data) + initialize(t, data, formFactorNormal) t.Run("TestGroupNamespacedGroupValidate", func(t *testing.T) { t.Run("Case=IPBlockWithPodSelectorDenied", func(t *testing.T) { testInvalidGroupIPBlockWithPodSelector(t) }) diff --git a/test/e2e/k8s_util.go b/test/e2e/k8s_util.go index ef0720afc6d..a300ed6d2aa 100644 --- a/test/e2e/k8s_util.go +++ b/test/e2e/k8s_util.go @@ -38,6 +38,8 @@ import ( "antrea.io/antrea/test/e2e/utils" ) +var ErrPodNotFound = errors.New("pod not found") + type KubernetesUtils struct { *TestData podCache map[string][]v1.Pod @@ -89,7 +91,11 @@ type probeResult struct { err error } -var ErrPodNotFound = errors.New("Pod not found") +// TestNamespaceMeta holds the relevant metadata of a test Namespace during initialization. +type TestNamespaceMeta struct { + Name string + Labels map[string]string +} // GetPodByLabel returns a Pod with the matching Namespace and "pod" label if it's found. // If the pod is not found, GetPodByLabel returns "ErrPodNotFound". @@ -736,9 +742,9 @@ func (data *TestData) DeleteNetworkPolicy(ns, name string) error { } // CleanNetworkPolicies is a convenience function for deleting NetworkPolicies in the provided namespaces. -func (data *TestData) CleanNetworkPolicies(namespaces map[string]string) error { +func (data *TestData) CleanNetworkPolicies(namespaces map[string]TestNamespaceMeta) error { for _, ns := range namespaces { - if err := data.clientset.NetworkingV1().NetworkPolicies(ns).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}); err != nil { + if err := data.clientset.NetworkingV1().NetworkPolicies(ns.Name).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}); err != nil { return fmt.Errorf("unable to delete NetworkPolicies in Namespace '%s': %w", ns, err) } } @@ -832,6 +838,35 @@ func (k *KubernetesUtils) GetCG(name string) (*crdv1beta1.ClusterGroup, error) { return k.crdClient.CrdV1beta1().ClusterGroups().Get(context.TODO(), name, metav1.GetOptions{}) } +// CreateGroup is a convenience function for creating an Antrea Group by namespace, name and selector. +func (k *KubernetesUtils) CreateGroup(namespace, name string, pSelector, nSelector *metav1.LabelSelector, ipBlocks []crdv1beta1.IPBlock) (*crdv1beta1.Group, error) { + log.Infof("Creating group %s/%s", namespace, name) + _, err := k.crdClient.CrdV1alpha3().Groups(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + g := &crdv1beta1.Group{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + } + if pSelector != nil { + g.Spec.PodSelector = pSelector + } + if nSelector != nil { + g.Spec.NamespaceSelector = nSelector + } + if len(ipBlocks) > 0 { + g.Spec.IPBlocks = ipBlocks + } + g, err = k.crdClient.CrdV1beta1().Groups(namespace).Create(context.TODO(), g, metav1.CreateOptions{}) + if err != nil { + log.Debugf("Unable to create group %s/%s: %s", namespace, name, err) + } + return g, err + } + return nil, fmt.Errorf("group with name %s/%s already exists", namespace, name) +} + // GetGroup is a convenience function for getting Groups func (k *KubernetesUtils) GetGroup(namespace, name string) (*crdv1beta1.Group, error) { return k.crdClient.CrdV1beta1().Groups(namespace).Get(context.TODO(), name, metav1.GetOptions{}) @@ -1101,10 +1136,15 @@ func (k *KubernetesUtils) ValidateRemoteCluster(remoteCluster *KubernetesUtils, } } -func (k *KubernetesUtils) Bootstrap(namespaces map[string]string, pods []string, createNamespaces bool, nodeNames map[string]string, hostNetworks map[string]bool) (map[string][]string, error) { +func (k *KubernetesUtils) Bootstrap(namespaces map[string]TestNamespaceMeta, pods []string, createNamespaces bool, nodeNames map[string]string, hostNetworks map[string]bool) (map[string][]string, error) { for key, ns := range namespaces { if createNamespaces { - _, err := k.CreateOrUpdateNamespace(ns, map[string]string{"ns": ns}) + if ns.Labels == nil { + ns.Labels = make(map[string]string) + } + // convenience label for testing + ns.Labels["ns"] = ns.Name + _, err := k.CreateOrUpdateNamespace(ns.Name, ns.Labels) if err != nil { return nil, fmt.Errorf("unable to create/update ns %s: %w", ns, err) } @@ -1119,8 +1159,8 @@ func (k *KubernetesUtils) Bootstrap(namespaces map[string]string, pods []string, } for _, pod := range pods { log.Infof("Creating/updating Pod '%s/%s'", ns, pod) - deployment := ns + pod - _, err := k.CreateOrUpdateDeployment(ns, deployment, 1, map[string]string{"pod": pod, "app": pod}, nodeName, hostNetwork) + deployment := ns.Name + pod + _, err := k.CreateOrUpdateDeployment(ns.Name, deployment, 1, map[string]string{"pod": pod, "app": pod}, nodeName, hostNetwork) if err != nil { return nil, fmt.Errorf("unable to create/update Deployment '%s/%s': %w", ns, pod, err) } @@ -1130,7 +1170,7 @@ func (k *KubernetesUtils) Bootstrap(namespaces map[string]string, pods []string, podIPs := make(map[string][]string, len(pods)*len(namespaces)) for _, podName := range pods { for _, ns := range namespaces { - allPods = append(allPods, NewPod(ns, podName)) + allPods = append(allPods, NewPod(ns.Name, podName)) } } for _, pod := range allPods { @@ -1150,7 +1190,7 @@ func (k *KubernetesUtils) Bootstrap(namespaces map[string]string, pods []string, return podIPs, nil } -func (k *KubernetesUtils) Cleanup(namespaces map[string]string) { +func (k *KubernetesUtils) Cleanup(namespaces map[string]TestNamespaceMeta) { // Cleanup any cluster-scoped resources. if err := k.CleanACNPs(); err != nil { log.Errorf("Error when cleaning up ACNPs: %v", err) @@ -1161,7 +1201,7 @@ func (k *KubernetesUtils) Cleanup(namespaces map[string]string) { for _, ns := range namespaces { log.Infof("Deleting test Namespace %s", ns) - if err := k.DeleteNamespace(ns, defaultTimeout); err != nil { + if err := k.DeleteNamespace(ns.Name, defaultTimeout); err != nil { log.Errorf("Error when deleting Namespace '%s': %v", ns, err) } } diff --git a/test/e2e/utils/cnp_spec_builder.go b/test/e2e/utils/cnp_spec_builder.go index 0c6acdc7585..708826754aa 100644 --- a/test/e2e/utils/cnp_spec_builder.go +++ b/test/e2e/utils/cnp_spec_builder.go @@ -130,15 +130,13 @@ func (b *ClusterNetworkPolicySpecBuilder) GetAppliedToPeer(podSelector map[strin func (b *ClusterNetworkPolicySpecBuilder) AddIngress(protoc AntreaPolicyProtocol, port *int32, portName *string, endPort, icmpType, icmpCode, igmpType *int32, groupAddress, cidr *string, podSelector map[string]string, nodeSelector map[string]string, nsSelector map[string]string, - podSelectorMatchExp []metav1.LabelSelectorRequirement, nodeSelectorMatchExp []metav1.LabelSelectorRequirement, nsSelectorMatchExp []metav1.LabelSelectorRequirement, selfNS bool, + podSelectorMatchExp []metav1.LabelSelectorRequirement, nodeSelectorMatchExp []metav1.LabelSelectorRequirement, nsSelectorMatchExp []metav1.LabelSelectorRequirement, namespaces *crdv1beta1.PeerNamespaces, ruleAppliedToSpecs []ACNPAppliedToSpec, action crdv1beta1.RuleAction, ruleClusterGroup, name string, serviceAccount *crdv1beta1.NamespacedName) *ClusterNetworkPolicySpecBuilder { var podSel *metav1.LabelSelector var nodeSel *metav1.LabelSelector var nsSel *metav1.LabelSelector - var ns *crdv1beta1.PeerNamespaces var appliedTos []crdv1beta1.AppliedTo - matchSelf := crdv1beta1.NamespaceMatchSelf if b.Spec.Ingress == nil { b.Spec.Ingress = []crdv1beta1.Rule{} @@ -162,11 +160,6 @@ func (b *ClusterNetworkPolicySpecBuilder) AddIngress(protoc AntreaPolicyProtocol MatchExpressions: nsSelectorMatchExp, } } - if selfNS == true { - ns = &crdv1beta1.PeerNamespaces{ - Match: matchSelf, - } - } var ipBlock *crdv1beta1.IPBlock if cidr != nil { ipBlock = &crdv1beta1.IPBlock{ @@ -185,12 +178,12 @@ func (b *ClusterNetworkPolicySpecBuilder) AddIngress(protoc AntreaPolicyProtocol } // An empty From/To in ACNP rules evaluates to match all addresses. policyPeer := make([]crdv1beta1.NetworkPolicyPeer, 0) - if podSel != nil || nodeSel != nil || nsSel != nil || ns != nil || ipBlock != nil || ruleClusterGroup != "" || serviceAccount != nil { + if podSel != nil || nodeSel != nil || nsSel != nil || namespaces != nil || ipBlock != nil || ruleClusterGroup != "" || serviceAccount != nil { policyPeer = []crdv1beta1.NetworkPolicyPeer{{ PodSelector: podSel, NodeSelector: nodeSel, NamespaceSelector: nsSel, - Namespaces: ns, + Namespaces: namespaces, IPBlock: ipBlock, Group: ruleClusterGroup, ServiceAccount: serviceAccount, @@ -297,14 +290,14 @@ func (b *ClusterNetworkPolicySpecBuilder) AddIngressForSrcPort(protoc AntreaPoli func (b *ClusterNetworkPolicySpecBuilder) AddEgress(protoc AntreaPolicyProtocol, port *int32, portName *string, endPort, icmpType, icmpCode, igmpType *int32, groupAddress, cidr *string, podSelector map[string]string, nodeSelector map[string]string, nsSelector map[string]string, - podSelectorMatchExp []metav1.LabelSelectorRequirement, nodeSelectorMatchExp []metav1.LabelSelectorRequirement, nsSelectorMatchExp []metav1.LabelSelectorRequirement, selfNS bool, + podSelectorMatchExp []metav1.LabelSelectorRequirement, nodeSelectorMatchExp []metav1.LabelSelectorRequirement, nsSelectorMatchExp []metav1.LabelSelectorRequirement, namespaces *crdv1beta1.PeerNamespaces, ruleAppliedToSpecs []ACNPAppliedToSpec, action crdv1beta1.RuleAction, ruleClusterGroup, name string, serviceAccount *crdv1beta1.NamespacedName) *ClusterNetworkPolicySpecBuilder { // For simplicity, we just reuse the Ingress code here. The underlying data model for ingress/egress is identical // With the exception of calling the rule `To` vs. `From`. c := &ClusterNetworkPolicySpecBuilder{} c.AddIngress(protoc, port, portName, endPort, icmpType, icmpCode, igmpType, groupAddress, cidr, podSelector, nodeSelector, nsSelector, - podSelectorMatchExp, nodeSelectorMatchExp, nsSelectorMatchExp, selfNS, ruleAppliedToSpecs, action, ruleClusterGroup, name, serviceAccount) + podSelectorMatchExp, nodeSelectorMatchExp, nsSelectorMatchExp, namespaces, ruleAppliedToSpecs, action, ruleClusterGroup, name, serviceAccount) theRule := c.Get().Spec.Ingress[0] b.Spec.Egress = append(b.Spec.Egress, crdv1beta1.Rule{ From 5d6985f0b77ff51a7e92ecf1c3758f3814faf1f6 Mon Sep 17 00:00:00 2001 From: Dyanngg Date: Mon, 9 Jan 2023 15:33:09 -0800 Subject: [PATCH 3/7] Add same-labels e2e testcase Signed-off-by: Dyanngg --- .../antrea-multicluster-leader-global.yml | 96 ++++++++++++--- ...cluster.crd.antrea.io_resourceexports.yaml | 48 ++++++-- ...cluster.crd.antrea.io_resourceimports.yaml | 48 ++++++-- .../networkpolicy/clusternetworkpolicy.go | 2 +- pkg/controller/networkpolicy/validate.go | 3 + test/e2e/antreapolicy_test.go | 113 +++++++++++++++++- test/e2e/reachability.go | 20 ++++ 7 files changed, 293 insertions(+), 37 deletions(-) diff --git a/multicluster/build/yamls/antrea-multicluster-leader-global.yml b/multicluster/build/yamls/antrea-multicluster-leader-global.yml index ca5fcd07e5b..7cb1e1ab4a1 100644 --- a/multicluster/build/yamls/antrea-multicluster-leader-global.yml +++ b/multicluster/build/yamls/antrea-multicluster-leader-global.yml @@ -1143,9 +1143,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -1550,9 +1558,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -2107,9 +2123,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -2514,9 +2538,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -4054,9 +4086,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -4461,9 +4501,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -5018,9 +5066,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -5425,9 +5481,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the diff --git a/multicluster/config/crd/bases/multicluster.crd.antrea.io_resourceexports.yaml b/multicluster/config/crd/bases/multicluster.crd.antrea.io_resourceexports.yaml index 9140f80d34f..4bd6104aace 100644 --- a/multicluster/config/crd/bases/multicluster.crd.antrea.io_resourceexports.yaml +++ b/multicluster/config/crd/bases/multicluster.crd.antrea.io_resourceexports.yaml @@ -733,9 +733,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -1140,9 +1148,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -1697,9 +1713,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -2104,9 +2128,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the diff --git a/multicluster/config/crd/bases/multicluster.crd.antrea.io_resourceimports.yaml b/multicluster/config/crd/bases/multicluster.crd.antrea.io_resourceimports.yaml index fafe9bec89f..4c5fe68f0a6 100644 --- a/multicluster/config/crd/bases/multicluster.crd.antrea.io_resourceimports.yaml +++ b/multicluster/config/crd/bases/multicluster.crd.antrea.io_resourceimports.yaml @@ -731,9 +731,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -1138,9 +1146,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -1695,9 +1711,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -2102,9 +2126,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the diff --git a/pkg/controller/networkpolicy/clusternetworkpolicy.go b/pkg/controller/networkpolicy/clusternetworkpolicy.go index 8bb78d97d5e..09c61bf26c2 100644 --- a/pkg/controller/networkpolicy/clusternetworkpolicy.go +++ b/pkg/controller/networkpolicy/clusternetworkpolicy.go @@ -117,7 +117,7 @@ func (n *NetworkPolicyController) filterPerNamespaceRuleACNPsByNSLabels(nsLabels peerNamespacesSelectorExists := func(peers []crdv1beta1.NetworkPolicyPeer) bool { for _, peer := range peers { - if peer.Namespaces != nil && peer.Namespaces.Match == crdv1beta1.NamespaceMatchSelf { + if peer.Namespaces != nil { return true } } diff --git a/pkg/controller/networkpolicy/validate.go b/pkg/controller/networkpolicy/validate.go index 5cd0e01071c..c03919eb0ee 100644 --- a/pkg/controller/networkpolicy/validate.go +++ b/pkg/controller/networkpolicy/validate.go @@ -656,6 +656,9 @@ func (v *antreaPolicyValidator) validatePeers(ingress, egress []crdv1beta1.Rule) if peer.NamespaceSelector != nil && peer.Namespaces != nil { return "namespaces and namespaceSelector cannot be set at the same time for a single NetworkPolicyPeer", false } + if peer.Namespaces != nil && numFieldsSetInStruct(*peer.Namespaces) > 1 { + return "only one matching criteria can be specified in a single peer namespaces field", false + } peerFieldsNum := numFieldsSetInStruct(peer) if peer.Group != "" && peerFieldsNum > 1 { return "group cannot be set with other peers in rules", false diff --git a/test/e2e/antreapolicy_test.go b/test/e2e/antreapolicy_test.go index dd28e6cdc02..eb6cb352aea 100644 --- a/test/e2e/antreapolicy_test.go +++ b/test/e2e/antreapolicy_test.go @@ -137,10 +137,10 @@ func initNamespaceMeta(formFactor string) map[string]TestNamespaceMeta { } allNamespaceMeta["dev"+strconv.Itoa(i)] = devNS } - allNamespaceMeta["no-tier-label"] = TestNamespaceMeta{ - Name: "no-tier-label-" + suffix, + allNamespaceMeta["no-tier"] = TestNamespaceMeta{ + Name: "no-tier-" + suffix, Labels: map[string]string{ - "purpose": "test", + "purpose": "test-exclusion", }, } } else if formFactor == formFactorNormal { @@ -3249,7 +3249,93 @@ func testACNPStrictNamespacesIsolation(t *testing.T) { } testCase := []*TestCase{ - {"ACNP strict Namespace isolation for all namespaces", []*TestStep{testStep1, testStep2}}, + {"ACNP strict Namespace isolation for all Namespaces", []*TestStep{testStep1, testStep2}}, + } + executeTests(t, testCase) +} + +func testACNPStrictNamespacesIsolationByLabels(t *testing.T) { + samePurposeTierLabels := &crdv1beta1.PeerNamespaces{ + SameLabels: []string{"purpose", "tier"}, + } + builder := &ClusterNetworkPolicySpecBuilder{} + builder = builder.SetName("test-acnp-strict-ns-isolation-by-labels"). + SetTier("securityops"). + SetPriority(1.0). + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{}}}) + builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, + samePurposeTierLabels, nil, crdv1beta1.RuleActionPass, "", "", nil) + builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, nil, nil, nil, + nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) + // prod1 and prod2 Namespaces should be able to connect to each other. The same goes for dev1 and + // dev2 Namespaces. However, any prod Namespace should not be able to connect to any dev Namespace + // due to different "tier" label values. For the "no-tier" Namespace, the first ingress rule will + // have no effect because the Namespace does not have a "tier" label. So every Pod in that Namespace + // will be isolated according to the second rule of the ACNP. + reachability := NewReachability(allPods, Dropped) + reachability.ExpectNamespaceIngressFromNamespace(getNS("prod1"), getNS("prod2"), Connected) + reachability.ExpectNamespaceEgressToNamespace(getNS("prod1"), getNS("prod2"), Connected) + reachability.ExpectNamespaceIngressFromNamespace(getNS("prod2"), getNS("prod1"), Connected) + reachability.ExpectNamespaceEgressToNamespace(getNS("prod2"), getNS("prod1"), Connected) + reachability.ExpectNamespaceIngressFromNamespace(getNS("dev1"), getNS("dev2"), Connected) + reachability.ExpectNamespaceEgressToNamespace(getNS("dev1"), getNS("dev2"), Connected) + reachability.ExpectNamespaceIngressFromNamespace(getNS("dev2"), getNS("dev1"), Connected) + reachability.ExpectNamespaceEgressToNamespace(getNS("dev2"), getNS("dev1"), Connected) + reachability.ExpectAllSelfNamespace(Connected) + reachability.ExpectSelfNamespace(getNS("no-tier"), Dropped) + reachability.ExpectSelf(allPods, Connected) + + testStep1 := &TestStep{ + "Namespace isolation by label, Port 80", + reachability, + []metav1.Object{builder.Get()}, + []int32{80}, + ProtocolTCP, + 0, + nil, + } + testCase := []*TestCase{ + {"ACNP strict Namespace isolation by Namespace purpose and tier labels", []*TestStep{testStep1}}, + } + executeTests(t, testCase) +} + +func testACNPStrictNamespacesIsolationBySingleLabel(t *testing.T) { + samePurposeTierLabels := &crdv1beta1.PeerNamespaces{ + SameLabels: []string{"purpose"}, + } + builder := &ClusterNetworkPolicySpecBuilder{} + builder = builder.SetName("test-acnp-strict-ns-isolation-by-single-label"). + SetTier("securityops"). + SetPriority(1.0). + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{}}}) + builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, + samePurposeTierLabels, nil, crdv1beta1.RuleActionPass, "", "", nil) + builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, nil, nil, nil, + nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) + // Namespaces are split into two logical groups, purpose=test (prod1,2 and dev1,2) and purpose=test-exclusion + // (no-tier). The two groups of Namespace should not be able to connect to each other. + reachability := NewReachability(allPods, Connected) + reachability.ExpectNamespaceEgressToNamespace(getNS("prod1"), getNS("no-tier"), Dropped) + reachability.ExpectNamespaceEgressToNamespace(getNS("prod2"), getNS("no-tier"), Dropped) + reachability.ExpectNamespaceEgressToNamespace(getNS("dev1"), getNS("no-tier"), Dropped) + reachability.ExpectNamespaceEgressToNamespace(getNS("dev2"), getNS("no-tier"), Dropped) + reachability.ExpectNamespaceIngressFromNamespace(getNS("prod1"), getNS("no-tier"), Dropped) + reachability.ExpectNamespaceIngressFromNamespace(getNS("prod2"), getNS("no-tier"), Dropped) + reachability.ExpectNamespaceIngressFromNamespace(getNS("dev1"), getNS("no-tier"), Dropped) + reachability.ExpectNamespaceIngressFromNamespace(getNS("dev2"), getNS("no-tier"), Dropped) + + testStep1 := &TestStep{ + "Namespace isolation by single label, Port 80", + reachability, + []metav1.Object{builder.Get()}, + []int32{80}, + ProtocolTCP, + 0, + nil, + } + testCase := []*TestCase{ + {"ACNP strict Namespace isolation by Namespace purpose label", []*TestStep{testStep1}}, } executeTests(t, testCase) } @@ -4499,6 +4585,25 @@ func testMulticastNP(t *testing.T, data *TestData, testNamespace string) { t.Run("Case=MulticastNPPolicyEgressDrop", func(t *testing.T) { testACNPMulticastEgressDrop(t, data, testNamespace) }) } +func TestAntreaPolicyExtendedNamespaces(t *testing.T) { + skipIfHasWindowsNodes(t) + skipIfAntreaPolicyDisabled(t) + + data, err := setupTest(t) + if err != nil { + t.Fatalf("Error when setting up test: %v", err) + } + defer teardownTest(t, data) + + initialize(t, data, formFactorLarge) + + t.Run("TestGroupACNPNamespaceLabelSelections", func(t *testing.T) { + t.Run("Case=ACNPStrictNamespacesIsolationByLabels", func(t *testing.T) { testACNPStrictNamespacesIsolationByLabels(t) }) + t.Run("Case=ACNPStrictNamespacesIsolationBySingleLabel", func(t *testing.T) { testACNPStrictNamespacesIsolationBySingleLabel(t) }) + }) + k8sUtils.Cleanup(namespaces) +} + func TestAntreaPolicyStatus(t *testing.T) { skipIfHasWindowsNodes(t) skipIfAntreaPolicyDisabled(t) diff --git a/test/e2e/reachability.go b/test/e2e/reachability.go index 0bf04418db5..48166588a4c 100644 --- a/test/e2e/reachability.go +++ b/test/e2e/reachability.go @@ -325,6 +325,26 @@ func (r *Reachability) ExpectEgressToNamespace(pod Pod, namespace string, connec } } +func (r *Reachability) ExpectNamespaceIngressFromNamespace(dstNamespace, srcNamespace string, connectivity PodConnectivityMark) { + dstPods, ok := r.PodsByNamespace[dstNamespace] + if !ok { + panic(fmt.Errorf("destination Namespace %s is not found", dstNamespace)) + } + for _, p := range dstPods { + r.ExpectIngressFromNamespace(p, srcNamespace, connectivity) + } +} + +func (r *Reachability) ExpectNamespaceEgressToNamespace(srcNamespace, dstNamespace string, connectivity PodConnectivityMark) { + srcPods, ok := r.PodsByNamespace[srcNamespace] + if !ok { + panic(fmt.Errorf("src Namespace %s is not found", srcNamespace)) + } + for _, p := range srcPods { + r.ExpectEgressToNamespace(p, dstNamespace, connectivity) + } +} + func (r *Reachability) Observe(pod1 Pod, pod2 Pod, connectivity PodConnectivityMark) { r.Observed.Set(string(pod1), string(pod2), connectivity) } From d92ab6aa6f21b33212567733ed90aef20191c894 Mon Sep 17 00:00:00 2001 From: Dyanngg Date: Thu, 26 Jan 2023 14:16:20 -0800 Subject: [PATCH 4/7] Add E2E testcase for Namespace label update Signed-off-by: Dyanngg --- .../networkpolicy/clusternetworkpolicy.go | 2 +- .../networkpolicy/networkpolicy_controller.go | 20 +- test/e2e/antreapolicy_test.go | 849 ++++++++---------- test/e2e/k8s_util.go | 16 +- 4 files changed, 409 insertions(+), 478 deletions(-) diff --git a/pkg/controller/networkpolicy/clusternetworkpolicy.go b/pkg/controller/networkpolicy/clusternetworkpolicy.go index 09c61bf26c2..6e5e51cadc1 100644 --- a/pkg/controller/networkpolicy/clusternetworkpolicy.go +++ b/pkg/controller/networkpolicy/clusternetworkpolicy.go @@ -125,7 +125,7 @@ func (n *NetworkPolicyController) filterPerNamespaceRuleACNPsByNSLabels(nsLabels } affectedPolicies := sets.New[string]() - objs, _ := n.acnpInformer.Informer().GetIndexer().ByIndex(perNamespaceRuleIndex, HasPerNamespaceRule) + objs, _ := n.acnpInformer.Informer().GetIndexer().ByIndex(perNamespaceRuleIndex, hasSuchRule) for _, obj := range objs { cnp := obj.(*crdv1beta1.ClusterNetworkPolicy) if affected := func() bool { diff --git a/pkg/controller/networkpolicy/networkpolicy_controller.go b/pkg/controller/networkpolicy/networkpolicy_controller.go index af1edbcf773..fc697554ce8 100644 --- a/pkg/controller/networkpolicy/networkpolicy_controller.go +++ b/pkg/controller/networkpolicy/networkpolicy_controller.go @@ -93,8 +93,9 @@ const ( addressGroupType grouping.GroupType = "addressGroup" internalGroupType grouping.GroupType = "internalGroup" - perNamespaceRuleIndex = "hasPerNamespaceRule" - HasPerNamespaceRule = "true" + perNamespaceRuleIndex = "hasPerNamespaceRule" + namespaceLabelRuleIndex = "hasNamespaceLabelRule" + hasSuchRule = "true" ) var ( @@ -333,9 +334,18 @@ var acnpIndexers = cache.Indexers{ if !ok { return []string{}, nil } - has := hasPerNamespaceRule(acnp) - if has { - return []string{HasPerNamespaceRule}, nil + if hasPerNSRule := hasPerNamespaceRule(acnp); hasPerNSRule { + return []string{hasSuchRule}, nil + } + return []string{}, nil + }, + namespaceLabelRuleIndex: func(obj interface{}) ([]string, error) { + cnp, ok := obj.(*secv1alpha1.ClusterNetworkPolicy) + if !ok { + return []string{}, nil + } + if hasNSLabelRule := hasNamespaceLabelRule(cnp); hasNSLabelRule { + return []string{hasSuchRule}, nil } return []string{}, nil }, diff --git a/test/e2e/antreapolicy_test.go b/test/e2e/antreapolicy_test.go index eb6cb352aea..e083576273b 100644 --- a/test/e2e/antreapolicy_test.go +++ b/test/e2e/antreapolicy_test.go @@ -460,13 +460,11 @@ func testACNPAllowXBtoA(t *testing.T) { testStep := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -520,6 +518,8 @@ func testACNPSourcePort(t *testing.T) { ProtocolTCP, 0, nil, + nil, + nil, }, { "Port 81", @@ -529,6 +529,8 @@ func testACNPSourcePort(t *testing.T) { ProtocolTCP, 0, nil, + nil, + nil, }, { "Port range 80-81", @@ -538,6 +540,8 @@ func testACNPSourcePort(t *testing.T) { ProtocolTCP, 0, nil, + nil, + nil, }, } testCase := []*TestCase{ @@ -563,13 +567,11 @@ func testACNPAllowXBtoYA(t *testing.T) { testStep := []*TestStep{ { - "NamedPort 81", - reachability, - []metav1.Object{builder.Get()}, - []int32{81}, - ProtocolTCP, - 0, - nil, + Name: "NamedPort 81", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{81}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -608,13 +610,11 @@ func testACNPPriorityOverrideDefaultDeny(t *testing.T) { testStep := []*TestStep{ { - "Both ACNP", - reachabilityBothACNP, - []metav1.Object{builder1.Get(), builder2.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Both ACNP", + Reachability: reachabilityBothACNP, + TestResources: []metav1.Object{builder1.Get(), builder2.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -645,13 +645,11 @@ func testACNPAllowNoDefaultIsolation(t *testing.T, protocol AntreaPolicyProtocol reachability := NewReachability(allPods, Connected) testStep := []*TestStep{ { - "Port 81", - reachability, - []metav1.Object{builder.Get()}, - []int32{81}, - protocol, - 0, - nil, + Name: "Port 81", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{81}, + Protocol: protocol, }, } testCase := []*TestCase{ @@ -684,13 +682,11 @@ func testACNPDropEgress(t *testing.T, protocol AntreaPolicyProtocol) { reachability.Expect(getPod("z", "a"), getPod("z", "c"), Dropped) testStep := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{builder.Get()}, - []int32{80}, - protocol, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: protocol, }, } testCase := []*TestCase{ @@ -717,13 +713,11 @@ func testACNPDropIngressInSelectedNamespace(t *testing.T) { reachability.ExpectSelf(allPods, Connected) testStep := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -755,22 +749,18 @@ func testACNPNoEffectOnOtherProtocols(t *testing.T) { testStep := []*TestStep{ { - "Port 80", - reachability1, - []metav1.Object{builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability1, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, { - "Port 80", - reachability2, - []metav1.Object{builder.Get()}, - []int32{80}, - ProtocolUDP, - 0, - nil, + Name: "Port 80", + Reachability: reachability2, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolUDP, }, } testCase := []*TestCase{ @@ -800,14 +790,12 @@ func testACNPAppliedToDenyXBtoCGWithYA(t *testing.T) { testStep := []*TestStep{ { - "NamedPort 81", - reachability, + Name: "NamedPort 81", + Reachability: reachability, // Note in this testcase the ClusterGroup is created after the ACNP - []metav1.Object{builder.Get(), cgBuilder.Get()}, - []int32{81}, - ProtocolTCP, - 0, - nil, + TestResources: []metav1.Object{builder.Get(), cgBuilder.Get()}, + Ports: []int32{81}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -837,13 +825,11 @@ func testACNPIngressRuleDenyCGWithXBtoYA(t *testing.T) { testStep := []*TestStep{ { - "NamedPort 81", - reachability, - []metav1.Object{cgBuilder.Get(), builder.Get()}, - []int32{81}, - ProtocolTCP, - 0, - nil, + Name: "NamedPort 81", + Reachability: reachability, + TestResources: []metav1.Object{cgBuilder.Get(), builder.Get()}, + Ports: []int32{81}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -870,14 +856,12 @@ func testACNPAppliedToRuleCGWithPodsAToNsZ(t *testing.T) { reachability.Expect(getPod("z", "a"), getPod("z", "c"), Dropped) testStep := []*TestStep{ { - "Port 80", - reachability, + Name: "Port 80", + Reachability: reachability, // Note in this testcase the ClusterGroup is created after the ACNP - []metav1.Object{builder.Get(), cgBuilder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + TestResources: []metav1.Object{builder.Get(), cgBuilder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -905,14 +889,12 @@ func testACNPEgressRulePodsAToCGWithNsZ(t *testing.T) { reachability.Expect(getPod("z", "a"), getPod("z", "c"), Dropped) testStep := []*TestStep{ { - "Port 80", - reachability, + Name: "Port 80", + Reachability: reachability, // Note in this testcase the ClusterGroup is created after the ACNP - []metav1.Object{builder.Get(), cgBuilder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + TestResources: []metav1.Object{builder.Get(), cgBuilder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -948,22 +930,18 @@ func testACNPClusterGroupUpdateAppliedTo(t *testing.T) { updatedReachability.Expect(getPod("z", "c"), getPod("z", "b"), Dropped) testStep := []*TestStep{ { - "CG Pods A", - reachability, - []metav1.Object{cgBuilder.Get(), builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "CG Pods A", + Reachability: reachability, + TestResources: []metav1.Object{cgBuilder.Get(), builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, { - "CG Pods C - update", - updatedReachability, - []metav1.Object{updatedCgBuilder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "CG Pods C - update", + Reachability: updatedReachability, + TestResources: []metav1.Object{updatedCgBuilder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -999,22 +977,18 @@ func testACNPClusterGroupUpdate(t *testing.T) { updatedReachability.Expect(getPod("y", "a"), getPod("y", "c"), Dropped) testStep := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{cgBuilder.Get(), builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{cgBuilder.Get(), builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, { - "Port 80 - update", - updatedReachability, - []metav1.Object{updatedCgBuilder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80 - update", + Reachability: updatedReachability, + TestResources: []metav1.Object{updatedCgBuilder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -1051,13 +1025,11 @@ func testACNPClusterGroupAppliedToPodAdd(t *testing.T, data *TestData) { } testStep := []*TestStep{ { - "Port 80", - nil, - []metav1.Object{cgBuilder.Get(), builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - cp, + Name: "Port 80", + TestResources: []metav1.Object{cgBuilder.Get(), builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, + CustomProbes: cp, }, } testCase := []*TestCase{ @@ -1099,14 +1071,12 @@ func testACNPClusterGroupRefRulePodAdd(t *testing.T, data *TestData) { } testStep := []*TestStep{ { - "Port 80", - nil, + Name: "Port 80", // Note in this testcase the ClusterGroup is created after the ACNP - []metav1.Object{builder.Get(), cgBuilder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - cp, + TestResources: []metav1.Object{builder.Get(), cgBuilder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, + CustomProbes: cp, }, } testCase := []*TestCase{ @@ -1169,13 +1139,11 @@ func testACNPClusterGroupRefRuleIPBlocks(t *testing.T) { reachability.Expect(getPod("z", "a"), getPod("y", "a"), Dropped) testStep := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{builder.Get(), cgBuilder.Get(), cgBuilder2.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get(), cgBuilder.Get(), cgBuilder2.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -1200,14 +1168,12 @@ func testANNPEgressRulePodsAToGrpWithPodsC(t *testing.T) { reachability.Expect(getPod("x", "a"), getPod("x", "c"), Dropped) testStep := []*TestStep{ { - "Port 80", - reachability, + Name: "Port 80", + Reachability: reachability, // Note in this testcase the Group is created after the ANNP - []metav1.Object{builder.Get(), grpBuilder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + TestResources: []metav1.Object{builder.Get(), grpBuilder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -1235,13 +1201,11 @@ func testANNPIngressRuleDenyGrpWithXCtoXA(t *testing.T) { testStep := []*TestStep{ { - "NamedPort 81", - reachability, - []metav1.Object{grpBuilder.Get(), builder.Get()}, - []int32{81}, - ProtocolTCP, - 0, - nil, + Name: "NamedPort 81", + Reachability: reachability, + TestResources: []metav1.Object{grpBuilder.Get(), builder.Get()}, + Ports: []int32{81}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -1271,22 +1235,18 @@ func testANNPGroupUpdate(t *testing.T) { updatedReachability.Expect(getPod("x", "a"), getPod("x", "b"), Dropped) testStep := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{grpBuilder.Get(), builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{grpBuilder.Get(), builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, { - "Port 80 - update", - updatedReachability, - []metav1.Object{updatedGrpBuilder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80 - update", + Reachability: updatedReachability, + TestResources: []metav1.Object{updatedGrpBuilder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -1314,14 +1274,12 @@ func testANNPAppliedToDenyXBtoGrpWithXA(t *testing.T) { testStep := []*TestStep{ { - "NamedPort 81", - reachability, + Name: "NamedPort 81", + Reachability: reachability, // Note in this testcase the Group is created after the ANNP - []metav1.Object{builder.Get(), grpBuilder.Get()}, - []int32{81}, - ProtocolTCP, - 0, - nil, + TestResources: []metav1.Object{builder.Get(), grpBuilder.Get()}, + Ports: []int32{81}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -1345,14 +1303,12 @@ func testANNPAppliedToRuleGrpWithPodsAToPodsC(t *testing.T) { reachability.Expect(getPod("x", "a"), getPod("x", "c"), Dropped) testStep := []*TestStep{ { - "Port 80", - reachability, + Name: "Port 80", + Reachability: reachability, // Note in this testcase the Group is created after the ANNP - []metav1.Object{builder.Get(), grpBuilder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + TestResources: []metav1.Object{builder.Get(), grpBuilder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -1382,22 +1338,18 @@ func testANNPGroupUpdateAppliedTo(t *testing.T) { updatedReachability.Expect(getPod("x", "b"), getPod("x", "c"), Dropped) testStep := []*TestStep{ { - "GRP Pods X/C", - reachability, - []metav1.Object{grpBuilder.Get(), builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "GRP Pods X/C", + Reachability: reachability, + TestResources: []metav1.Object{grpBuilder.Get(), builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, { - "GRP Pods X/B - update", - updatedReachability, - []metav1.Object{updatedGrpBuilder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "GRP Pods X/B - update", + Reachability: updatedReachability, + TestResources: []metav1.Object{updatedGrpBuilder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -1432,13 +1384,11 @@ func testANNPGroupAppliedToPodAdd(t *testing.T, data *TestData) { } testStep := []*TestStep{ { - "Port 80", - nil, - []metav1.Object{grpBuilder.Get(), builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - cp, + Name: "Port 80", + TestResources: []metav1.Object{grpBuilder.Get(), builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, + CustomProbes: cp, }, } testCase := []*TestCase{ @@ -1482,13 +1432,12 @@ func testANNPGroupServiceRefPodAdd(t *testing.T, data *TestData) { reachability := NewReachability(allPods, Connected) reachability.Expect(getPod("x", "b"), getPod("x", "a"), Dropped) testStep := &TestStep{ - "Port 80 updated", - reachability, - []metav1.Object{svc1, svc2, grpBuilder1.Get(), grpBuilder2.Get(), builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - cp, + Name: "Port 80 updated", + Reachability: reachability, + TestResources: []metav1.Object{svc1, svc2, grpBuilder1.Get(), grpBuilder2.Get(), builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, + CustomProbes: cp, } testSteps := []*TestStep{testStep} @@ -1568,13 +1517,11 @@ func testANNPGroupServiceRefCreateAndUpdate(t *testing.T) { reachability := NewReachability(allPods, Connected) reachability.Expect(getPod("x", "b"), getPod("x", "a"), Dropped) testStep1 := &TestStep{ - "Port 80", - reachability, - []metav1.Object{svc1, svc2, grpBuilder1.Get(), grpBuilder2.Get(), builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{svc1, svc2, grpBuilder1.Get(), grpBuilder2.Get(), builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, } // Test update selector of Service referred in grp-svc1, and update serviceReference of grp-svc2. @@ -1586,13 +1533,11 @@ func testANNPGroupServiceRefCreateAndUpdate(t *testing.T) { reachability2 := NewReachability(allPods, Connected) reachability2.Expect(getPod("x", "c"), getPod("x", "b"), Dropped) testStep2 := &TestStep{ - "Port 80 updated", - reachability2, - []metav1.Object{svc1Updated, svc3, grpBuilder1.Get(), grpBuilder2Updated.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80 updated", + Reachability: reachability2, + TestResources: []metav1.Object{svc1Updated, svc3, grpBuilder1.Get(), grpBuilder2Updated.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, } testSteps := []*TestStep{testStep1, testStep2} @@ -1638,13 +1583,11 @@ func testANNPGroupRefRuleIPBlocks(t *testing.T) { reachability.Expect(getPod("x", "c"), getPod("x", "a"), Dropped) testStep := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{builder.Get(), grpBuilder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get(), grpBuilder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -1681,14 +1624,12 @@ func testANNPNestedGroupCreateAndUpdate(t *testing.T, data *TestData) { reachability.ExpectSelf(allPods, Connected) testStep1 := &TestStep{ - "Port 80", - reachability, + Name: "Port 80", + Reachability: reachability, // Note in this testcase the Group is created after the ANNP - []metav1.Object{builder.Get(), svc1, grpBuilder1.Get(), grpBuilderNested.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + TestResources: []metav1.Object{builder.Get(), svc1, grpBuilder1.Get(), grpBuilderNested.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, } // Test update "grp-nested" to include "grp-select-x-b" as well. @@ -1714,13 +1655,12 @@ func testANNPNestedGroupCreateAndUpdate(t *testing.T, data *TestData) { }, } testStep2 := &TestStep{ - "Port 80 updated", - reachability2, - []metav1.Object{grpBuilder2.Get(), grpBuilderNested.Get()}, - []int32{80}, - ProtocolTCP, - 0, - cp, + Name: "Port 80 updated", + Reachability: reachability2, + TestResources: []metav1.Object{grpBuilder2.Get(), grpBuilderNested.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, + CustomProbes: cp, } // In this testStep grp3 is created. It's members should reflect in grp-nested @@ -1731,13 +1671,11 @@ func testANNPNestedGroupCreateAndUpdate(t *testing.T, data *TestData) { reachability3.ExpectEgressToNamespace(getPod("x", "c"), getNS("x"), Dropped) reachability3.ExpectSelf(allPods, Connected) testStep3 := &TestStep{ - "Port 80 updated", - reachability3, - []metav1.Object{grpBuilder3.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80 updated", + Reachability: reachability3, + TestResources: []metav1.Object{grpBuilder3.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, } testSteps := []*TestStep{testStep1, testStep2, testStep3} @@ -1786,13 +1724,11 @@ func testBaselineNamespaceIsolation(t *testing.T) { reachability.ExpectIngressFromNamespace(getPod("x", "c"), getNS("z"), Dropped) testStep := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{builder.Get(), k8sNPBuilder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get(), k8sNPBuilder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -1850,25 +1786,21 @@ func testACNPPriorityOverride(t *testing.T) { testStepTwoACNP := []*TestStep{ { - "Two Policies with different priorities", - reachabilityTwoACNPs, - []metav1.Object{builder3.Get(), builder2.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Two Policies with different priorities", + Reachability: reachabilityTwoACNPs, + TestResources: []metav1.Object{builder3.Get(), builder2.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } // Create the Policies in specific order to make sure that priority re-assignments work as expected. testStepAll := []*TestStep{ { - "All three Policies", - reachabilityAllACNPs, - []metav1.Object{builder3.Get(), builder1.Get(), builder2.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "All three Policies", + Reachability: reachabilityAllACNPs, + TestResources: []metav1.Object{builder3.Get(), builder1.Get(), builder2.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -1927,24 +1859,20 @@ func testACNPTierOverride(t *testing.T) { testStepTwoACNP := []*TestStep{ { - "Two Policies in different tiers", - reachabilityTwoACNPs, - []metav1.Object{builder3.Get(), builder2.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Two Policies in different tiers", + Reachability: reachabilityTwoACNPs, + TestResources: []metav1.Object{builder3.Get(), builder2.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testStepAll := []*TestStep{ { - "All three Policies in different tiers", - reachabilityAllACNPs, - []metav1.Object{builder3.Get(), builder1.Get(), builder2.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "All three Policies in different tiers", + Reachability: reachabilityAllACNPs, + TestResources: []metav1.Object{builder3.Get(), builder1.Get(), builder2.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -1992,13 +1920,11 @@ func testACNPCustomTiers(t *testing.T) { reachabilityTwoACNPs.Expect(getPod("z", "c"), getPod("x", "c"), Dropped) testStepTwoACNP := []*TestStep{ { - "Two Policies in different tiers", - reachabilityTwoACNPs, - []metav1.Object{builder2.Get(), builder1.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Two Policies in different tiers", + Reachability: reachabilityTwoACNPs, + TestResources: []metav1.Object{builder2.Get(), builder1.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -2036,13 +1962,11 @@ func testACNPPriorityConflictingRule(t *testing.T) { reachabilityBothACNP.ExpectEgressToNamespace(getPod("z", "c"), getNS("x"), Dropped) testStep := []*TestStep{ { - "Both ACNP", - reachabilityBothACNP, - []metav1.Object{builder1.Get(), builder2.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Both ACNP", + Reachability: reachabilityBothACNP, + TestResources: []metav1.Object{builder1.Get(), builder2.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -2083,13 +2007,11 @@ func testACNPRulePriority(t *testing.T) { reachabilityBothACNP.ExpectIngressFromNamespace(getPod("y", "c"), getNS("x"), Dropped) testStep := []*TestStep{ { - "Both ACNP", - reachabilityBothACNP, - []metav1.Object{builder2.Get(), builder1.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Both ACNP", + Reachability: reachabilityBothACNP, + TestResources: []metav1.Object{builder2.Get(), builder1.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -2114,13 +2036,11 @@ func testACNPPortRange(t *testing.T) { reachability.Expect(getPod("z", "a"), getPod("z", "c"), Dropped) testSteps := []*TestStep{ { - fmt.Sprintf("ACNP Drop Ports 8080:8082"), - reachability, - []metav1.Object{builder.Get()}, - []int32{8080, 8081, 8082}, - ProtocolTCP, - 0, - nil, + Name: fmt.Sprintf("ACNP Drop Ports 8080:8082"), + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{8080, 8081, 8082}, + Protocol: ProtocolTCP, }, } @@ -2146,13 +2066,11 @@ func testACNPRejectEgress(t *testing.T) { reachability.Expect(getPod("z", "a"), getPod("z", "c"), Rejected) testStep := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -2177,13 +2095,11 @@ func testACNPRejectIngress(t *testing.T, protocol AntreaPolicyProtocol) { reachability.Expect(getPod("z", "c"), getPod("z", "a"), Rejected) testStep := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{builder.Get()}, - []int32{80}, - protocol, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: protocol, }, } testCase := []*TestCase{ @@ -2400,13 +2316,11 @@ func testANNPPortRange(t *testing.T) { var testSteps []*TestStep testSteps = append(testSteps, &TestStep{ - fmt.Sprintf("ANNP Drop Ports 8080:8082"), - reachability, - []metav1.Object{builder.Get()}, - []int32{8080, 8081, 8082}, - ProtocolTCP, - 0, - nil, + Name: fmt.Sprintf("ANNP Drop Ports 8080:8082"), + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{8080, 8081, 8082}, + Protocol: ProtocolTCP, }) testCase := []*TestCase{ @@ -2429,13 +2343,11 @@ func testANNPBasic(t *testing.T) { reachability.Expect(getPod("x", "b"), getPod("y", "a"), Dropped) testStep := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } // build a K8s NetworkPolicy that has the same appliedTo but allows all traffic. @@ -2446,13 +2358,11 @@ func testANNPBasic(t *testing.T) { nil, nil, nil, nil) testStep2 := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{builder.Get(), k8sNPBuilder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get(), k8sNPBuilder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -2845,13 +2755,11 @@ func testAppliedToPerRule(t *testing.T) { reachability.Expect(getPod("z", "b"), getPod("y", "b"), Dropped) testStep := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } @@ -2873,16 +2781,13 @@ func testAppliedToPerRule(t *testing.T) { reachability2.Expect(getPod("z", "b"), getPod("y", "b"), Dropped) testStep2 := []*TestStep{ { - "Port 80", - reachability2, - []metav1.Object{builder2.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability2, + TestResources: []metav1.Object{builder2.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } - testCase := []*TestCase{ {"ANNP AppliedTo per rule", testStep}, {"ACNP AppliedTo per rule", testStep2}, @@ -2909,13 +2814,11 @@ func testACNPClusterGroupServiceRefCreateAndUpdate(t *testing.T, data *TestData) reachability := NewReachability(allPods, Connected) reachability.Expect(getPod("y", "b"), getPod("x", "a"), Dropped) testStep1 := &TestStep{ - "Port 80", - reachability, - []metav1.Object{svc1, svc2, cgBuilder1.Get(), cgBuilder2.Get(), builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{svc1, svc2, cgBuilder1.Get(), cgBuilder2.Get(), builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, } // Test update selector of Service referred in cg-svc1, and update serviceReference of cg-svc2. @@ -2943,13 +2846,12 @@ func testACNPClusterGroupServiceRefCreateAndUpdate(t *testing.T, data *TestData) reachability2 := NewReachability(allPods, Connected) reachability2.Expect(getPod("y", "a"), getPod("x", "b"), Dropped) testStep2 := &TestStep{ - "Port 80 updated", - reachability2, - []metav1.Object{svc1Updated, svc3, cgBuilder1.Get(), cgBuilder2Updated.Get()}, - []int32{80}, - ProtocolTCP, - 0, - cp, + Name: "Port 80 updated", + Reachability: reachability2, + TestResources: []metav1.Object{svc1Updated, svc3, cgBuilder1.Get(), cgBuilder2Updated.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, + CustomProbes: cp, } builderUpdated := &ClusterNetworkPolicySpecBuilder{} @@ -2960,13 +2862,11 @@ func testACNPClusterGroupServiceRefCreateAndUpdate(t *testing.T, data *TestData) // Pod x/a should not allow ingress from y/b per the updated ACNP spec. testStep3 := &TestStep{ - "Port 80 ACNP spec updated to selector", - reachability, - []metav1.Object{builderUpdated.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80 ACNP spec updated to selector", + Reachability: reachability, + TestResources: []metav1.Object{builderUpdated.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, } testSteps := []*TestStep{testStep1, testStep2, testStep3} @@ -3007,14 +2907,12 @@ func testACNPNestedClusterGroupCreateAndUpdate(t *testing.T, data *TestData) { reachability.ExpectEgressToNamespace(getPod("x", "a"), getNS("z"), Dropped) testStep1 := &TestStep{ - "Port 80", - reachability, + Name: "Port 80", + Reachability: reachability, // Note in this testcase the ClusterGroup is created after the ACNP - []metav1.Object{builder.Get(), svc1, cgBuilder1.Get(), cgBuilderNested.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + TestResources: []metav1.Object{builder.Get(), svc1, cgBuilder1.Get(), cgBuilderNested.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, } // Test update "cg-nested" to include "cg-select-y-b" as well. @@ -3039,13 +2937,12 @@ func testACNPNestedClusterGroupCreateAndUpdate(t *testing.T, data *TestData) { }, } testStep2 := &TestStep{ - "Port 80 updated", - reachability2, - []metav1.Object{cgBuilder2.Get(), cgBuilderNested.Get()}, - []int32{80}, - ProtocolTCP, - 0, - cp, + Name: "Port 80 updated", + Reachability: reachability2, + TestResources: []metav1.Object{cgBuilder2.Get(), cgBuilderNested.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, + CustomProbes: cp, } // In this testStep cg3 is created. It's members should reflect in cg-nested @@ -3055,13 +2952,11 @@ func testACNPNestedClusterGroupCreateAndUpdate(t *testing.T, data *TestData) { reachability3.ExpectEgressToNamespace(getPod("y", "b"), getNS("z"), Dropped) reachability3.ExpectEgressToNamespace(getPod("y", "c"), getNS("z"), Dropped) testStep3 := &TestStep{ - "Port 80 updated", - reachability3, - []metav1.Object{cgBuilder3.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80 updated", + Reachability: reachability3, + TestResources: []metav1.Object{cgBuilder3.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, } testSteps := []*TestStep{testStep1, testStep2, testStep3} @@ -3114,13 +3009,11 @@ func testACNPNestedIPBlockClusterGroupCreateAndUpdate(t *testing.T) { reachability.Expect(getPod("x", "a"), getPod("y", "a"), Dropped) reachability.Expect(getPod("x", "b"), getPod("y", "a"), Dropped) testStep := &TestStep{ - "Port 80", - reachability, - []metav1.Object{builder.Get(), cgBuilder1.Get(), cgBuilder2.Get(), cgParent.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get(), cgBuilder1.Get(), cgBuilder2.Get(), cgParent.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, } cgBuilder3 := &ClusterGroupSpecBuilder{} @@ -3134,13 +3027,11 @@ func testACNPNestedIPBlockClusterGroupCreateAndUpdate(t *testing.T) { reachability2.Expect(getPod("x", "a"), getPod("y", "a"), Dropped) reachability2.Expect(getPod("x", "c"), getPod("y", "a"), Dropped) testStep2 := &TestStep{ - "Port 80, updated", - reachability2, - []metav1.Object{cgBuilder3.Get(), updatedCGParent.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80, updated", + Reachability: reachability2, + TestResources: []metav1.Object{cgBuilder3.Get(), updatedCGParent.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, } testCase := []*TestCase{ @@ -3164,13 +3055,11 @@ func testACNPNamespaceIsolation(t *testing.T) { reachability := NewReachability(allPods, Dropped) reachability.ExpectAllSelfNamespace(Connected) testStep1 := &TestStep{ - "Port 80", - reachability, - []metav1.Object{builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, } builder2 := &ClusterNetworkPolicySpecBuilder{} @@ -3190,13 +3079,11 @@ func testACNPNamespaceIsolation(t *testing.T) { reachability2.ExpectEgressToNamespace(getPod("x", "c"), getNS("y"), Dropped) reachability2.ExpectEgressToNamespace(getPod("x", "c"), getNS("z"), Dropped) testStep2 := &TestStep{ - "Port 80", - reachability2, - []metav1.Object{builder2.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability2, + TestResources: []metav1.Object{builder2.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, } testCase := []*TestCase{ @@ -3221,13 +3108,11 @@ func testACNPStrictNamespacesIsolation(t *testing.T) { reachability := NewReachability(allPods, Dropped) reachability.ExpectAllSelfNamespace(Connected) testStep1 := &TestStep{ - "Namespace isolation, Port 80", - reachability, - []metav1.Object{builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Namespace isolation, Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, } // Add a K8s namespaced NetworkPolicy in ns x that isolates all Pods in that namespace. @@ -3239,13 +3124,11 @@ func testACNPStrictNamespacesIsolation(t *testing.T) { reachability2.ExpectSelfNamespace(getNS("x"), Dropped) reachability2.ExpectSelf(allPods, Connected) testStep2 := &TestStep{ - "Namespace isolation with K8s NP, Port 80", - reachability2, - []metav1.Object{builder2.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Namespace isolation with K8s NP, Port 80", + Reachability: reachability2, + TestResources: []metav1.Object{builder2.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, } testCase := []*TestCase{ @@ -3285,27 +3168,25 @@ func testACNPStrictNamespacesIsolationByLabels(t *testing.T) { reachability.ExpectSelfNamespace(getNS("no-tier"), Dropped) reachability.ExpectSelf(allPods, Connected) - testStep1 := &TestStep{ - "Namespace isolation by label, Port 80", - reachability, - []metav1.Object{builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + testStep := &TestStep{ + Name: "Namespace isolation by label, Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, } testCase := []*TestCase{ - {"ACNP strict Namespace isolation by Namespace purpose and tier labels", []*TestStep{testStep1}}, + {"ACNP strict Namespace isolation by Namespace purpose and tier labels", []*TestStep{testStep}}, } executeTests(t, testCase) } -func testACNPStrictNamespacesIsolationBySingleLabel(t *testing.T) { +func testACNPStrictNamespacesIsolationBySingleLabel(t *testing.T, data *TestData) { samePurposeTierLabels := &crdv1beta1.PeerNamespaces{ SameLabels: []string{"purpose"}, } builder := &ClusterNetworkPolicySpecBuilder{} - builder = builder.SetName("test-acnp-strict-ns-isolation-by-single-label"). + builder = builder.SetName("test-acnp-strict-ns-isolation-by-single-purpose-label"). SetTier("securityops"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{}}}) @@ -3325,19 +3206,52 @@ func testACNPStrictNamespacesIsolationBySingleLabel(t *testing.T) { reachability.ExpectNamespaceIngressFromNamespace(getNS("dev1"), getNS("no-tier"), Dropped) reachability.ExpectNamespaceIngressFromNamespace(getNS("dev2"), getNS("no-tier"), Dropped) - testStep1 := &TestStep{ - "Namespace isolation by single label, Port 80", - reachability, - []metav1.Object{builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + testStep := &TestStep{ + Name: "Namespace isolation by single label, Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, + } + + labelNoTierNS := func() { + nsReturned, err := data.clientset.CoreV1().Namespaces().Get(context.TODO(), getNS("no-tier"), metav1.GetOptions{}) + if err != nil { + t.Errorf("failed to get the Namespace that has no tier label") + } + nsReturned.Labels = map[string]string{ + "purpose": "test", + } + log.Infof("Updating no-tier Namespace purpose label") + if _, err = data.clientset.CoreV1().Namespaces().Update(context.TODO(), nsReturned, metav1.UpdateOptions{}); err != nil { + t.Errorf("failed to update the no-tier Namespace with purpose=test label") + } + } + revertLabel := func() { + nsReturned, err := data.clientset.CoreV1().Namespaces().Get(context.TODO(), getNS("no-tier"), metav1.GetOptions{}) + if err != nil { + t.Errorf("failed to get the no-tier Namespace") + } + nsReturned.Labels = map[string]string{ + "purpose": "test-exclusion", + } + if _, err = data.clientset.CoreV1().Namespaces().Update(context.TODO(), nsReturned, metav1.UpdateOptions{}); err != nil { + t.Errorf("failed to revert the purpose label for the no-tier Namespace") + } + } + newReachability := NewReachability(allPods, Connected) + testSetp2 := &TestStep{ + Name: "Namespace isolation after Namespace label update, Port 80", + Reachability: newReachability, + Ports: []int32{80}, + Protocol: ProtocolTCP, + CustomSetup: labelNoTierNS, + CustomTeardown: revertLabel, } testCase := []*TestCase{ - {"ACNP strict Namespace isolation by Namespace purpose label", []*TestStep{testStep1}}, + {"ACNP strict Namespace isolation by Namespace purpose label", []*TestStep{testStep, testSetp2}}, } - executeTests(t, testCase) + executeTestsWithData(t, testCase, data) } func testFQDNPolicy(t *testing.T) { @@ -4250,7 +4164,9 @@ func executeTestsWithData(t *testing.T, testList []*TestCase, data *TestData) { for _, step := range testCase.Steps { log.Infof("running step %s of test case %s", step.Name, testCase.Name) applyTestStepResources(t, step) - + if step.CustomSetup != nil { + step.CustomSetup() + } reachability := step.Reachability if reachability != nil { start := time.Now() @@ -4270,6 +4186,9 @@ func executeTestsWithData(t *testing.T, testList []*TestCase, data *TestData) { for _, p := range step.CustomProbes { doProbe(t, data, p, step.Protocol) } + if step.CustomTeardown != nil { + step.CustomTeardown() + } } log.Debug("Cleaning-up all policies and groups created by this Testcase") cleanupTestCaseResources(t, testCase) @@ -4599,7 +4518,7 @@ func TestAntreaPolicyExtendedNamespaces(t *testing.T) { t.Run("TestGroupACNPNamespaceLabelSelections", func(t *testing.T) { t.Run("Case=ACNPStrictNamespacesIsolationByLabels", func(t *testing.T) { testACNPStrictNamespacesIsolationByLabels(t) }) - t.Run("Case=ACNPStrictNamespacesIsolationBySingleLabel", func(t *testing.T) { testACNPStrictNamespacesIsolationBySingleLabel(t) }) + t.Run("Case=ACNPStrictNamespacesIsolationBySingleLabel", func(t *testing.T) { testACNPStrictNamespacesIsolationBySingleLabel(t, data) }) }) k8sUtils.Cleanup(namespaces) } diff --git a/test/e2e/k8s_util.go b/test/e2e/k8s_util.go index a300ed6d2aa..71038752779 100644 --- a/test/e2e/k8s_util.go +++ b/test/e2e/k8s_util.go @@ -62,13 +62,15 @@ type TestCase struct { // TestStep is a single unit of testing spec. It includes the policy specs that need to be // applied for this test, the port to test traffic on and the expected Reachability matrix. type TestStep struct { - Name string - Reachability *Reachability - TestResources []metav1.Object - Ports []int32 - Protocol utils.AntreaPolicyProtocol - Duration time.Duration - CustomProbes []*CustomProbe + Name string + Reachability *Reachability + TestResources []metav1.Object + Ports []int32 + Protocol utils.AntreaPolicyProtocol + Duration time.Duration + CustomProbes []*CustomProbe + CustomSetup func() + CustomTeardown func() } // CustomProbe will spin up (or update) SourcePod and DestPod such that Add event of Pods From de625a83b1bb36e3b9d3427b50292a829675e8d6 Mon Sep 17 00:00:00 2001 From: Dyanngg Date: Thu, 26 Jan 2023 21:34:06 -0800 Subject: [PATCH 5/7] Improve namespace label filtering Signed-off-by: Dyanngg --- .../networkpolicy/networkpolicy_controller.go | 7 +-- test/e2e/antreaipam_anp_test.go | 46 ++++++++----------- 2 files changed, 22 insertions(+), 31 deletions(-) diff --git a/pkg/controller/networkpolicy/networkpolicy_controller.go b/pkg/controller/networkpolicy/networkpolicy_controller.go index fc697554ce8..f083cf54fb6 100644 --- a/pkg/controller/networkpolicy/networkpolicy_controller.go +++ b/pkg/controller/networkpolicy/networkpolicy_controller.go @@ -94,7 +94,7 @@ const ( internalGroupType grouping.GroupType = "internalGroup" perNamespaceRuleIndex = "hasPerNamespaceRule" - namespaceLabelRuleIndex = "hasNamespaceLabelRule" + namespaceLabelRuleIndex = "namespaceRuleLabelKeys" hasSuchRule = "true" ) @@ -344,10 +344,7 @@ var acnpIndexers = cache.Indexers{ if !ok { return []string{}, nil } - if hasNSLabelRule := hasNamespaceLabelRule(cnp); hasNSLabelRule { - return []string{hasSuchRule}, nil - } - return []string{}, nil + return namespaceRuleLabelKeys(cnp).UnsortedList(), nil }, } diff --git a/test/e2e/antreaipam_anp_test.go b/test/e2e/antreaipam_anp_test.go index 11d6cd14866..7ee0504f413 100644 --- a/test/e2e/antreaipam_anp_test.go +++ b/test/e2e/antreaipam_anp_test.go @@ -35,25 +35,25 @@ func initializeAntreaIPAM(t *testing.T, data *TestData) { p8082 = 8082 p8085 = 8085 pods = []string{"a", "b", "c"} - namespaces = make(map[string]string) - regularNamespaces := make(map[string]string) + namespaces = make(map[string]TestNamespaceMeta) + regularNamespaces := make(map[string]TestNamespaceMeta) suffix := randName("") - namespaces["x"] = "antrea-x-" + suffix + namespaces["x"] = TestNamespaceMeta{ + Name: "antrea-x-" + suffix, + } regularNamespaces["x"] = namespaces["x"] // This function "initializeAntreaIPAM" will be used more than once, and variable "allPods" is global. // It should be empty every time when "initializeAntreaIPAM" is performed, otherwise there will be unexpected // results. allPods = []Pod{} podsByNamespace = make(map[string][]Pod) - for _, ns := range antreaIPAMNamespaces { - namespaces[ns] = ns + namespaces[ns] = TestNamespaceMeta{Name: ns} } - for _, podName := range pods { for _, ns := range namespaces { - allPods = append(allPods, NewPod(ns, podName)) - podsByNamespace[ns] = append(podsByNamespace[ns], NewPod(ns, podName)) + allPods = append(allPods, NewPod(ns.Name, podName)) + podsByNamespace[ns.Name] = append(podsByNamespace[ns.Name], NewPod(ns.Name, podName)) } } @@ -195,35 +195,29 @@ func testAntreaIPAMACNP(t *testing.T, protocol e2eutils.AntreaPolicyProtocol, ac SetAppliedToGroup([]e2eutils.ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "c"}}}) if isIngress { builder.AddIngress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, - nil, nil, nil, nil, false, nil, ruleAction, "", "", nil) + nil, nil, nil, nil, nil, nil, ruleAction, "", "", nil) builder2.AddIngress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, - nil, nil, nil, nil, false, nil, ruleAction, "", "", nil) + nil, nil, nil, nil, nil, nil, ruleAction, "", "", nil) builder3.AddIngress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, - nil, nil, nil, nil, false, nil, ruleAction, "", "", nil) + nil, nil, nil, nil, nil, nil, ruleAction, "", "", nil) } else { builder.AddEgress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, - nil, nil, nil, nil, false, nil, ruleAction, "", "", nil) + nil, nil, nil, nil, nil, nil, ruleAction, "", "", nil) builder2.AddEgress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, - nil, nil, nil, nil, false, nil, ruleAction, "", "", nil) + nil, nil, nil, nil, nil, nil, ruleAction, "", "", nil) builder3.AddEgress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, - nil, nil, nil, nil, false, nil, ruleAction, "", "", nil) + nil, nil, nil, nil, nil, nil, ruleAction, "", "", nil) } reachability := NewReachability(allPods, action) - for _, ns := range namespaces { - for _, pod := range []string{"/a", "/b", "/c"} { - reachability.Expect(Pod(ns+pod), Pod(ns+pod), Connected) - } - } + reachability.ExpectSelf(allPods, Connected) testStep := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{builder.Get(), builder2.Get(), builder3.Get()}, - []int32{80}, - protocol, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get(), builder2.Get(), builder3.Get()}, + Ports: []int32{80}, + Protocol: protocol, }, } testCase := []*TestCase{ From 83e98657980040f20b796bf368e29d30cefb32f4 Mon Sep 17 00:00:00 2001 From: Dyanngg Date: Fri, 7 Jul 2023 15:21:54 -0700 Subject: [PATCH 6/7] Address comments Signed-off-by: Dyanngg --- .../antrea/crds/clusternetworkpolicy.yaml | 24 +- build/yamls/antrea-aks.yml | 24 +- build/yamls/antrea-crds.yml | 24 +- build/yamls/antrea-eks.yml | 24 +- build/yamls/antrea-gke.yml | 24 +- build/yamls/antrea-ipsec.yml | 24 +- build/yamls/antrea.yml | 24 +- .../yamls/antrea-multicluster-leader.yml | 96 +++- .../networkpolicy/clusternetworkpolicy.go | 128 ++++-- .../clusternetworkpolicy_test.go | 110 ++++- .../networkpolicy/networkpolicy_controller.go | 14 +- pkg/controller/networkpolicy/validate.go | 11 +- pkg/controller/networkpolicy/validate_test.go | 59 +++ test/e2e/antreapolicy_test.go | 126 ++--- test/e2e/nodenetworkpolicy_test.go | 434 ++++++++---------- 15 files changed, 700 insertions(+), 446 deletions(-) diff --git a/build/charts/antrea/crds/clusternetworkpolicy.yaml b/build/charts/antrea/crds/clusternetworkpolicy.yaml index 6027b25f055..a6e2cca2530 100644 --- a/build/charts/antrea/crds/clusternetworkpolicy.yaml +++ b/build/charts/antrea/crds/clusternetworkpolicy.yaml @@ -344,10 +344,6 @@ spec: enum: - Self type: string - sameLabels: - type: array - items: - type: string ipBlock: type: object properties: @@ -609,10 +605,6 @@ spec: enum: - Self type: string - sameLabels: - type: array - items: - type: string ipBlock: type: object properties: @@ -1084,11 +1076,17 @@ spec: type: object namespaces: type: object + maxProperties: 1 + minProperties: 1 properties: match: + type: string enum: - Self - type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: @@ -1360,11 +1358,17 @@ spec: type: object namespaces: type: object + maxProperties: 1 + minProperties: 1 properties: match: + type: string enum: - Self - type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: diff --git a/build/yamls/antrea-aks.yml b/build/yamls/antrea-aks.yml index 1cd6039781e..f4bc9bb2162 100644 --- a/build/yamls/antrea-aks.yml +++ b/build/yamls/antrea-aks.yml @@ -910,10 +910,6 @@ spec: enum: - Self type: string - sameLabels: - type: array - items: - type: string ipBlock: type: object properties: @@ -1175,10 +1171,6 @@ spec: enum: - Self type: string - sameLabels: - type: array - items: - type: string ipBlock: type: object properties: @@ -1650,11 +1642,17 @@ spec: type: object namespaces: type: object + maxProperties: 1 + minProperties: 1 properties: match: + type: string enum: - Self - type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: @@ -1926,11 +1924,17 @@ spec: type: object namespaces: type: object + maxProperties: 1 + minProperties: 1 properties: match: + type: string enum: - Self - type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: diff --git a/build/yamls/antrea-crds.yml b/build/yamls/antrea-crds.yml index d0289ba5f1b..e0497dcf8b2 100644 --- a/build/yamls/antrea-crds.yml +++ b/build/yamls/antrea-crds.yml @@ -903,10 +903,6 @@ spec: enum: - Self type: string - sameLabels: - type: array - items: - type: string ipBlock: type: object properties: @@ -1168,10 +1164,6 @@ spec: enum: - Self type: string - sameLabels: - type: array - items: - type: string ipBlock: type: object properties: @@ -1643,11 +1635,17 @@ spec: type: object namespaces: type: object + maxProperties: 1 + minProperties: 1 properties: match: + type: string enum: - Self - type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: @@ -1919,11 +1917,17 @@ spec: type: object namespaces: type: object + maxProperties: 1 + minProperties: 1 properties: match: + type: string enum: - Self - type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: diff --git a/build/yamls/antrea-eks.yml b/build/yamls/antrea-eks.yml index b96ce87679b..bec701d3056 100644 --- a/build/yamls/antrea-eks.yml +++ b/build/yamls/antrea-eks.yml @@ -910,10 +910,6 @@ spec: enum: - Self type: string - sameLabels: - type: array - items: - type: string ipBlock: type: object properties: @@ -1175,10 +1171,6 @@ spec: enum: - Self type: string - sameLabels: - type: array - items: - type: string ipBlock: type: object properties: @@ -1650,11 +1642,17 @@ spec: type: object namespaces: type: object + maxProperties: 1 + minProperties: 1 properties: match: + type: string enum: - Self - type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: @@ -1926,11 +1924,17 @@ spec: type: object namespaces: type: object + maxProperties: 1 + minProperties: 1 properties: match: + type: string enum: - Self - type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: diff --git a/build/yamls/antrea-gke.yml b/build/yamls/antrea-gke.yml index 8af4f5b6291..a4ae810f7b0 100644 --- a/build/yamls/antrea-gke.yml +++ b/build/yamls/antrea-gke.yml @@ -910,10 +910,6 @@ spec: enum: - Self type: string - sameLabels: - type: array - items: - type: string ipBlock: type: object properties: @@ -1175,10 +1171,6 @@ spec: enum: - Self type: string - sameLabels: - type: array - items: - type: string ipBlock: type: object properties: @@ -1650,11 +1642,17 @@ spec: type: object namespaces: type: object + maxProperties: 1 + minProperties: 1 properties: match: + type: string enum: - Self - type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: @@ -1926,11 +1924,17 @@ spec: type: object namespaces: type: object + maxProperties: 1 + minProperties: 1 properties: match: + type: string enum: - Self - type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: diff --git a/build/yamls/antrea-ipsec.yml b/build/yamls/antrea-ipsec.yml index a3e4884dcb9..393cb59da0a 100644 --- a/build/yamls/antrea-ipsec.yml +++ b/build/yamls/antrea-ipsec.yml @@ -910,10 +910,6 @@ spec: enum: - Self type: string - sameLabels: - type: array - items: - type: string ipBlock: type: object properties: @@ -1175,10 +1171,6 @@ spec: enum: - Self type: string - sameLabels: - type: array - items: - type: string ipBlock: type: object properties: @@ -1650,11 +1642,17 @@ spec: type: object namespaces: type: object + maxProperties: 1 + minProperties: 1 properties: match: + type: string enum: - Self - type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: @@ -1926,11 +1924,17 @@ spec: type: object namespaces: type: object + maxProperties: 1 + minProperties: 1 properties: match: + type: string enum: - Self - type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: diff --git a/build/yamls/antrea.yml b/build/yamls/antrea.yml index ba6a4c22c51..2451670ca39 100644 --- a/build/yamls/antrea.yml +++ b/build/yamls/antrea.yml @@ -910,10 +910,6 @@ spec: enum: - Self type: string - sameLabels: - type: array - items: - type: string ipBlock: type: object properties: @@ -1175,10 +1171,6 @@ spec: enum: - Self type: string - sameLabels: - type: array - items: - type: string ipBlock: type: object properties: @@ -1650,11 +1642,17 @@ spec: type: object namespaces: type: object + maxProperties: 1 + minProperties: 1 properties: match: + type: string enum: - Self - type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: @@ -1926,11 +1924,17 @@ spec: type: object namespaces: type: object + maxProperties: 1 + minProperties: 1 properties: match: + type: string enum: - Self - type: string + sameLabels: + type: array + items: + type: string ipBlock: type: object properties: diff --git a/multicluster/build/yamls/antrea-multicluster-leader.yml b/multicluster/build/yamls/antrea-multicluster-leader.yml index 8c8a4c10ad8..38f2c43342c 100644 --- a/multicluster/build/yamls/antrea-multicluster-leader.yml +++ b/multicluster/build/yamls/antrea-multicluster-leader.yml @@ -1143,9 +1143,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -1550,9 +1558,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -2107,9 +2123,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -2514,9 +2538,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -4054,9 +4086,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -4461,9 +4501,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -5018,9 +5066,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the @@ -5425,9 +5481,17 @@ spec: ingress/egress rules. Cannot be set with NamespaceSelector.' properties: match: - description: NamespaceMatchType describes Namespace - matching strategy. + description: Selects from the same Namespace of + the appliedTo workloads. type: string + sameLabels: + description: Selects Namespaces that share the + same values for the given set of label keys + with the appliedTo Namespace. Namespaces must + have all the label keys. + items: + type: string + type: array type: object nodeSelector: description: Select certain Nodes which match the diff --git a/pkg/controller/networkpolicy/clusternetworkpolicy.go b/pkg/controller/networkpolicy/clusternetworkpolicy.go index 6e5e51cadc1..23e3405879f 100644 --- a/pkg/controller/networkpolicy/clusternetworkpolicy.go +++ b/pkg/controller/networkpolicy/clusternetworkpolicy.go @@ -35,8 +35,7 @@ import ( ) const ( - labelValueUndefined = "Undefined" - labelValueSeparater = "," + labelValueSeparator = "," ) func getACNPReference(cnp *crdv1beta1.ClusterNetworkPolicy) *controlplane.NetworkPolicyReference { @@ -125,16 +124,13 @@ func (n *NetworkPolicyController) filterPerNamespaceRuleACNPsByNSLabels(nsLabels } affectedPolicies := sets.New[string]() - objs, _ := n.acnpInformer.Informer().GetIndexer().ByIndex(perNamespaceRuleIndex, hasSuchRule) + objs, _ := n.acnpInformer.Informer().GetIndexer().ByIndex(perNamespaceRuleIndex, indexValueTrue) for _, obj := range objs { cnp := obj.(*crdv1beta1.ClusterNetworkPolicy) if affected := func() bool { if len(cnp.Spec.AppliedTo) > 0 { // The policy has only spec level AppliedTo. - if namespaceLabelMatches(cnp.Spec.AppliedTo) { - return true - } - return false + return namespaceLabelMatches(cnp.Spec.AppliedTo) } // The policy has rule level AppliedTo. // It needs to check each rule's peers. If any peer of the rule has PeerNamespaces selector and its @@ -157,6 +153,36 @@ func (n *NetworkPolicyController) filterPerNamespaceRuleACNPsByNSLabels(nsLabels return affectedPolicies } +// getACNPsWithRulesMatchingAnyLabelKey gets all ACNPs that have relevant rules based on Namespace label keys. +func (n *NetworkPolicyController) getACNPsWithRulesMatchingAnyLabelKey(labelKeys sets.Set[string]) sets.Set[string] { + matchedPolicyNames := sets.New[string]() + for k := range labelKeys { + objs, _ := n.acnpInformer.Informer().GetIndexer().ByIndex(namespaceRuleLabelKeyIndex, k) + for _, obj := range objs { + cnp := obj.(*crdv1beta1.ClusterNetworkPolicy) + matchedPolicyNames.Insert(cnp.Name) + } + } + return matchedPolicyNames +} + +// getACNPsWithRulesMatchingAnyUpdatedLabels gets all ACNPs that have rules based on Namespace +// label keys, which have changes in value across Namespace update. +func (n *NetworkPolicyController) getACNPsWithRulesMatchingAnyUpdatedLabels(oldNSLabels, newNSLabels map[string]string) sets.Set[string] { + updatedLabelKeys := sets.New[string]() + for k, v := range oldNSLabels { + if v2, ok := newNSLabels[k]; !ok || v2 != v { + updatedLabelKeys.Insert(k) + } + } + for k, v2 := range newNSLabels { + if v, ok := oldNSLabels[k]; !ok || v != v2 { + updatedLabelKeys.Insert(k) + } + } + return n.getACNPsWithRulesMatchingAnyLabelKey(updatedLabelKeys) +} + // addNamespace receives Namespace ADD events and triggers all ClusterNetworkPolicies that have a // per-namespace rule applied to this Namespace to be re-processed. func (n *NetworkPolicyController) addNamespace(obj interface{}) { @@ -185,6 +211,10 @@ func (n *NetworkPolicyController) updateNamespace(oldObj, curObj interface{}) { affectedACNPsByOldLabels := n.filterPerNamespaceRuleACNPsByNSLabels(oldNamespace.Labels) affectedACNPsByCurLabels := n.filterPerNamespaceRuleACNPsByNSLabels(curNamespace.Labels) affectedACNPs := utilsets.SymmetricDifferenceString(affectedACNPsByOldLabels, affectedACNPsByCurLabels) + // Any ACNPs that has Namespace label rules that refers to the label key set that has + // changed during the Namespace update will need to be re-processed. + acnpsWithRulesMatchingNSLabelKeys := n.getACNPsWithRulesMatchingAnyUpdatedLabels(oldNamespace.Labels, curNamespace.Labels) + affectedACNPs = affectedACNPs.Union(acnpsWithRulesMatchingNSLabelKeys) for cnpName := range affectedACNPs { // Ignore the ClusterNetworkPolicy if it has been removed during the process. if cnp, err := n.acnpLister.Get(cnpName); err == nil { @@ -345,7 +375,7 @@ func (n *NetworkPolicyController) processClusterNetworkPolicy(cnp *crdv1beta1.Cl atgPerAffectedNS := map[string]*antreatypes.AppliedToGroup{} // When appliedTo is set at spec level and the ACNP has rules that select peer Namespaces by sameLabels, // this field tracks the labels of all Namespaces selected by the appliedTo. - affectedNSAndLabels := map[string]map[string]string{} + labelsPerAffectedNS := map[string]labels.Set{} // clusterSetScopeSelectorKeys keeps track of all the ClusterSet-scoped selector keys of the policy. // During policy peer processing, any ClusterSet-scoped selector will be registered with the // labelIdentityInterface and added to this set. By the end of the function, this set will @@ -357,10 +387,10 @@ func (n *NetworkPolicyController) processClusterNetworkPolicy(cnp *crdv1beta1.Cl atg := n.createAppliedToGroup(at.ServiceAccount.Namespace, serviceAccountNameToPodSelector(at.ServiceAccount.Name), nil, nil, nil) appliedToGroups = mergeAppliedToGroups(appliedToGroups, atg) atgPerAffectedNS[at.ServiceAccount.Namespace] = atg - affectedNSAndLabels[at.ServiceAccount.Namespace] = n.getNamespaceLabels(at.ServiceAccount.Namespace) + labelsPerAffectedNS[at.ServiceAccount.Namespace] = n.getNamespaceLabels(at.ServiceAccount.Namespace) } else { - affectedNSAndLabels = n.getAffectedNamespacesForAppliedTo(at) - for ns := range affectedNSAndLabels { + labelsPerAffectedNS = n.getAffectedNamespacesForAppliedTo(at) + for ns := range labelsPerAffectedNS { atg := n.createAppliedToGroup(ns, at.PodSelector, nil, at.ExternalEntitySelector, nil) appliedToGroups = mergeAppliedToGroups(appliedToGroups, atg) atgPerAffectedNS[ns] = atg @@ -448,9 +478,9 @@ func (n *NetworkPolicyController) processClusterNetworkPolicy(cnp *crdv1beta1.Cl } if len(nsLabelPeers) > 0 { if len(cnp.Spec.AppliedTo) > 0 { - // All affected Namespaces and their labels are already stored in affectedNSAndLabels + // All affected Namespaces and their labels are already stored in labelsPerAffectedNS for _, peer := range nsLabelPeers { - nsGroupByLabelVal := groupNamespacesByLabelValue(affectedNSAndLabels, peer.Namespaces.SameLabels) + nsGroupByLabelVal := groupNamespacesByLabelValue(labelsPerAffectedNS, peer.Namespaces.SameLabels) for labelValues, groupedNamespaces := range nsGroupByLabelVal { peer, atgs, ags, selKeys := n.toAntreaPeerForSameLabelNamespaces(peer, cnp, atgPerAffectedNS, labelValues, groupedNamespaces) clusterSetScopeSelectorKeys = clusterSetScopeSelectorKeys.Union(selKeys) @@ -459,22 +489,22 @@ func (n *NetworkPolicyController) processClusterNetworkPolicy(cnp *crdv1beta1.Cl } } else { atgPerRuleAffectedNS := map[string]*antreatypes.AppliedToGroup{} - ruleAffectedNSLabels := map[string]map[string]string{} + labelsPerRuleAffectedNS := map[string]labels.Set{} for _, at := range cnpRule.AppliedTo { if at.ServiceAccount != nil { atg := n.createAppliedToGroup(at.ServiceAccount.Namespace, serviceAccountNameToPodSelector(at.ServiceAccount.Name), nil, nil, nil) atgPerRuleAffectedNS[at.ServiceAccount.Namespace] = atg - ruleAffectedNSLabels[at.ServiceAccount.Namespace] = n.getNamespaceLabels(at.ServiceAccount.Namespace) + labelsPerRuleAffectedNS[at.ServiceAccount.Namespace] = n.getNamespaceLabels(at.ServiceAccount.Namespace) } else { - ruleAffectedNSLabels = n.getAffectedNamespacesForAppliedTo(at) - for ns := range ruleAffectedNSLabels { + labelsPerRuleAffectedNS = n.getAffectedNamespacesForAppliedTo(at) + for ns := range labelsPerRuleAffectedNS { atg := n.createAppliedToGroup(ns, at.PodSelector, nil, at.ExternalEntitySelector, nil) atgPerRuleAffectedNS[ns] = atg } } } for _, peer := range nsLabelPeers { - nsGroupByLabelVal := groupNamespacesByLabelValue(ruleAffectedNSLabels, peer.Namespaces.SameLabels) + nsGroupByLabelVal := groupNamespacesByLabelValue(labelsPerRuleAffectedNS, peer.Namespaces.SameLabels) for labelValues, groupedNamespaces := range nsGroupByLabelVal { peer, atgs, ags, selKeys := n.toAntreaPeerForSameLabelNamespaces(peer, cnp, atgPerRuleAffectedNS, labelValues, groupedNamespaces) clusterSetScopeSelectorKeys = clusterSetScopeSelectorKeys.Union(selKeys) @@ -542,14 +572,42 @@ func hasPerNamespaceRule(cnp *crdv1beta1.ClusterNetworkPolicy) bool { return false } -func (n *NetworkPolicyController) getNamespaceLabels(ns string) map[string]string { - namespace, _ := n.namespaceLister.Get(ns) +func namespaceRuleLabelKeys(cnp *crdv1beta1.ClusterNetworkPolicy) sets.Set[string] { + keys := sets.New[string]() + for _, ingress := range cnp.Spec.Ingress { + for _, peer := range ingress.From { + if peer.Namespaces != nil { + for _, k := range peer.Namespaces.SameLabels { + keys.Insert(k) + } + } + } + } + for _, egress := range cnp.Spec.Egress { + for _, peer := range egress.To { + if peer.Namespaces != nil { + for _, k := range peer.Namespaces.SameLabels { + keys.Insert(k) + } + } + } + } + return keys +} + +func (n *NetworkPolicyController) getNamespaceLabels(ns string) labels.Set { + namespace, err := n.namespaceLister.Get(ns) + if err != nil { + // The Namespace referred to (by ServiceAccount etc.) does not exist yet. + // ACNP will be re-queued once that Namespace event is received. + return labels.Set{} + } return namespace.Labels } // groupNamespaceByLabelValue groups Namespaces if they have the same label value for all the -// label keys listed. If a Namespace is missing at least one of the label keys, it will be -// not be grouped. Example: +// label keys listed. If a Namespace is missing at least one of the label keys, it will not +// be grouped. Example: // // ns1: app=web, tier=test, tenant=t1 // ns2: app=web, tier=test, tenant=t2 @@ -560,10 +618,10 @@ func (n *NetworkPolicyController) getNamespaceLabels(ns string) map[string]strin // Result after grouping: // "web,test,": [ns1, ns2] // "web,production,": [ns3, ns4] -func groupNamespacesByLabelValue(affectedNSAndLabels map[string]map[string]string, labelKeys []string) map[string][]string { +func groupNamespacesByLabelValue(affectedNSAndLabels map[string]labels.Set, labelKeys []string) map[string][]string { nsGroupedByLabelVal := map[string][]string{} for ns, nsLabels := range affectedNSAndLabels { - if groupKey := getLabelValues(nsLabels, labelKeys); groupKey != labelValueUndefined { + if groupKey := getLabelValues(nsLabels, labelKeys); groupKey != "" { nsGroupedByLabelVal[groupKey] = append(nsGroupedByLabelVal[groupKey], ns) } } @@ -573,19 +631,17 @@ func groupNamespacesByLabelValue(affectedNSAndLabels map[string]map[string]strin func getLabelValues(labels map[string]string, labelKeys []string) string { key := "" for _, k := range labelKeys { - if v, ok := labels[k]; !ok { - return labelValueUndefined - } else { - key += v + labelValueSeparater + if v, ok := labels[k]; ok { + key += v + labelValueSeparator } } return key } -// labelKeyValPairsToSelector creates a LabelSelector based on a list of label keys +// convertSameLabelsToSelector creates a LabelSelector based on a list of label keys // and their expected values. -func labelKeyValPairsToSelector(labelKeys []string, labelValues string) *metav1.LabelSelector { - labelValuesSep := strings.Split(labelValues, labelValueSeparater) +func convertSameLabelsToSelector(labelKeys []string, labelValues string) *metav1.LabelSelector { + labelValuesSep := strings.Split(labelValues, labelValueSeparator) labelMatchCriteria := map[string]string{} for i := range labelKeys { labelMatchCriteria[labelKeys[i]] = labelValuesSep[i] @@ -603,10 +659,10 @@ func (n *NetworkPolicyController) toAntreaPeerForSameLabelNamespaces(peer crdv1b namespacesByLabelValues []string) (*controlplane.NetworkPolicyPeer, []*antreatypes.AppliedToGroup, []*antreatypes.AddressGroup, sets.Set[string]) { labelKeys := peer.Namespaces.SameLabels var labelIdentities []uint32 - uniqueLabelIDs := map[uint32]struct{}{} + uniqueLabelIDs := sets.New[uint32]() clusterSetScopeSelectorKeys := sets.New[string]() // select Namespaces who, for specific label keys, have the same values as the appliedTo Namespaces. - nsSelForSameLabels := labelKeyValPairsToSelector(labelKeys, labelValues) + nsSelForSameLabels := convertSameLabelsToSelector(labelKeys, labelValues) addressGroups := []*antreatypes.AddressGroup{n.createAddressGroup("", peer.PodSelector, nsSelForSameLabels, peer.ExternalEntitySelector, nil)} if n.stretchNPEnabled && peer.Scope == crdv1beta1.ScopeClusterSet { newClusterSetScopeSelector := antreatypes.NewGroupSelector("", peer.PodSelector, nsSelForSameLabels, peer.ExternalEntitySelector, nil) @@ -615,7 +671,7 @@ func (n *NetworkPolicyController) toAntreaPeerForSameLabelNamespaces(peer crdv1b // with the labelIdentityInterface. matchedLabelIDs := n.labelIdentityInterface.AddSelector(newClusterSetScopeSelector, internalNetworkPolicyKeyFunc(np)) for _, id := range matchedLabelIDs { - uniqueLabelIDs[id] = struct{}{} + uniqueLabelIDs.Insert(id) } } for id := range uniqueLabelIDs { @@ -681,8 +737,8 @@ func splitPeersByScope(rule crdv1beta1.Rule, dir controlplane.Direction) ([]crdv // getAffectedNamespacesForAppliedTo computes the Namespaces currently affected by the appliedTo // Namespace selectors, and returns these Namespaces along with their labels. -func (n *NetworkPolicyController) getAffectedNamespacesForAppliedTo(appliedTo crdv1beta1.AppliedTo) map[string]map[string]string { - affectedNSAndLabels := map[string]map[string]string{} +func (n *NetworkPolicyController) getAffectedNamespacesForAppliedTo(appliedTo crdv1beta1.AppliedTo) map[string]labels.Set { + affectedNSAndLabels := map[string]labels.Set{} nsLabelSelector := appliedTo.NamespaceSelector if appliedTo.Group != "" { diff --git a/pkg/controller/networkpolicy/clusternetworkpolicy_test.go b/pkg/controller/networkpolicy/clusternetworkpolicy_test.go index 036df95358d..fb8a250c741 100644 --- a/pkg/controller/networkpolicy/clusternetworkpolicy_test.go +++ b/pkg/controller/networkpolicy/clusternetworkpolicy_test.go @@ -26,6 +26,7 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" "antrea.io/antrea/multicluster/controllers/multicluster/common" @@ -1957,9 +1958,9 @@ func TestProcessClusterNetworkPolicy(t *testing.T) { } } -func TestAddCNP(t *testing.T) { +func TestAddACNP(t *testing.T) { _, npc := newController(nil, nil) - cnp := getCNP() + cnp := getACNP() npc.addCNP(cnp) require.Equal(t, 1, npc.internalNetworkPolicyQueue.Len()) key, done := npc.internalNetworkPolicyQueue.Get() @@ -1968,9 +1969,9 @@ func TestAddCNP(t *testing.T) { assert.False(t, done) } -func TestUpdateCNP(t *testing.T) { +func TestUpdateACNP(t *testing.T) { _, npc := newController(nil, nil) - cnp := getCNP() + cnp := getACNP() newCNP := cnp.DeepCopy() // Make a change to the CNP. newCNP.Annotations = map[string]string{"foo": "bar"} @@ -1982,9 +1983,9 @@ func TestUpdateCNP(t *testing.T) { assert.False(t, done) } -func TestDeleteCNP(t *testing.T) { +func TestDeleteACNP(t *testing.T) { _, npc := newController(nil, nil) - cnp := getCNP() + cnp := getACNP() npc.deleteCNP(cnp) require.Equal(t, 1, npc.internalNetworkPolicyQueue.Len()) key, done := npc.internalNetworkPolicyQueue.Get() @@ -2218,7 +2219,7 @@ func TestProcessRefGroupOrClusterGroup(t *testing.T) { // util functions for testing. -func getCNP() *crdv1beta1.ClusterNetworkPolicy { +func getACNP() *crdv1beta1.ClusterNetworkPolicy { p10 := float64(10) allowAction := crdv1beta1.RuleActionAllow selectorA := metav1.LabelSelector{MatchLabels: map[string]string{"foo1": "bar1"}} @@ -2372,3 +2373,98 @@ func TestFilterPerNamespaceRuleACNPsByNSLabels(t *testing.T) { }) } } + +func TestGetACNPsWithRulesMatchingLabelKeysAcrossNSUpdate(t *testing.T) { + acnp1 := &crdv1beta1.ClusterNetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{Name: "acnp-with-tier-label-rule"}, + Spec: crdv1beta1.ClusterNetworkPolicySpec{ + AppliedTo: []crdv1beta1.AppliedTo{ + { + NamespaceSelector: &metav1.LabelSelector{}, + }, + }, + Ingress: []crdv1beta1.Rule{ + { + From: []crdv1beta1.NetworkPolicyPeer{ + { + Namespaces: &crdv1beta1.PeerNamespaces{ + SameLabels: []string{"tier"}, + }, + }, + }, + }, + }, + }, + } + acnp2 := &crdv1beta1.ClusterNetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{Name: "acnp-with-tier-and-purpose-label-rule"}, + Spec: crdv1beta1.ClusterNetworkPolicySpec{ + AppliedTo: []crdv1beta1.AppliedTo{ + { + NamespaceSelector: &metav1.LabelSelector{}, + }, + }, + Ingress: []crdv1beta1.Rule{ + { + From: []crdv1beta1.NetworkPolicyPeer{ + { + Namespaces: &crdv1beta1.PeerNamespaces{ + SameLabels: []string{"tier", "purpose"}, + }, + }, + }, + }, + }, + }, + } + tests := []struct { + name string + oldNSLabels labels.Set + newNSLabels labels.Set + want sets.Set[string] + }{ + { + name: "Namespace updated to have tier label", + oldNSLabels: map[string]string{ + "kubernetes.io/metadata.name": "ns1", + }, + newNSLabels: map[string]string{ + "kubernetes.io/metadata.name": "ns1", + "tier": "production", + }, + want: sets.New[string](acnp1.Name, acnp2.Name), + }, + { + name: "Namespace updated to have purpose label", + oldNSLabels: map[string]string{ + "kubernetes.io/metadata.name": "ns2", + }, + newNSLabels: map[string]string{ + "kubernetes.io/metadata.name": "ns2", + "purpose": "test", + }, + want: sets.New[string](acnp2.Name), + }, + { + name: "Namespace updated for irrelevant label", + oldNSLabels: map[string]string{ + "kubernetes.io/metadata.name": "ns3", + "tier": "production", + }, + newNSLabels: map[string]string{ + "kubernetes.io/metadata.name": "ns2", + "tier": "production", + "owned-by": "dev-team", + }, + want: sets.New[string](), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, c := newController(nil, []runtime.Object{acnp1, acnp2}) + c.acnpStore.Add(acnp1) + c.acnpStore.Add(acnp2) + assert.Equal(t, tt.want, c.getACNPsWithRulesMatchingAnyUpdatedLabels(tt.oldNSLabels, tt.newNSLabels)) + }) + } +} diff --git a/pkg/controller/networkpolicy/networkpolicy_controller.go b/pkg/controller/networkpolicy/networkpolicy_controller.go index f083cf54fb6..ca5458900e2 100644 --- a/pkg/controller/networkpolicy/networkpolicy_controller.go +++ b/pkg/controller/networkpolicy/networkpolicy_controller.go @@ -93,9 +93,9 @@ const ( addressGroupType grouping.GroupType = "addressGroup" internalGroupType grouping.GroupType = "internalGroup" - perNamespaceRuleIndex = "hasPerNamespaceRule" - namespaceLabelRuleIndex = "namespaceRuleLabelKeys" - hasSuchRule = "true" + perNamespaceRuleIndex = "hasPerNamespaceRule" + namespaceRuleLabelKeyIndex = "namespaceRuleLabelKeys" + indexValueTrue = "true" ) var ( @@ -334,13 +334,13 @@ var acnpIndexers = cache.Indexers{ if !ok { return []string{}, nil } - if hasPerNSRule := hasPerNamespaceRule(acnp); hasPerNSRule { - return []string{hasSuchRule}, nil + if hasPerNamespaceRule(acnp) { + return []string{indexValueTrue}, nil } return []string{}, nil }, - namespaceLabelRuleIndex: func(obj interface{}) ([]string, error) { - cnp, ok := obj.(*secv1alpha1.ClusterNetworkPolicy) + namespaceRuleLabelKeyIndex: func(obj interface{}) ([]string, error) { + cnp, ok := obj.(*secv1beta1.ClusterNetworkPolicy) if !ok { return []string{}, nil } diff --git a/pkg/controller/networkpolicy/validate.go b/pkg/controller/networkpolicy/validate.go index c03919eb0ee..5639866e452 100644 --- a/pkg/controller/networkpolicy/validate.go +++ b/pkg/controller/networkpolicy/validate.go @@ -656,8 +656,15 @@ func (v *antreaPolicyValidator) validatePeers(ingress, egress []crdv1beta1.Rule) if peer.NamespaceSelector != nil && peer.Namespaces != nil { return "namespaces and namespaceSelector cannot be set at the same time for a single NetworkPolicyPeer", false } - if peer.Namespaces != nil && numFieldsSetInStruct(*peer.Namespaces) > 1 { - return "only one matching criteria can be specified in a single peer namespaces field", false + if peer.Namespaces != nil { + if numFieldsSetInStruct(*peer.Namespaces) > 1 { + return "only one matching criteria can be specified in a single peer namespaces field", false + } + for _, k := range peer.Namespaces.SameLabels { + if err := validation.IsQualifiedName(k); err != nil { + return fmt.Sprintf("Invalid label key in sameLabels rule: %s", k), false + } + } } peerFieldsNum := numFieldsSetInStruct(peer) if peer.Group != "" && peerFieldsNum > 1 { diff --git a/pkg/controller/networkpolicy/validate_test.go b/pkg/controller/networkpolicy/validate_test.go index 43b20fe13ca..271da1e06c1 100644 --- a/pkg/controller/networkpolicy/validate_test.go +++ b/pkg/controller/networkpolicy/validate_test.go @@ -703,6 +703,65 @@ func TestValidateAntreaClusterNetworkPolicy(t *testing.T) { operation: admv1.Create, expectedReason: "namespaces and namespaceSelector cannot be set at the same time for a single NetworkPolicyPeer", }, + { + name: "acnp-double-peer-namespace-field", + policy: &crdv1beta1.ClusterNetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "acnp-double-peer-namespace-field", + }, + Spec: crdv1beta1.ClusterNetworkPolicySpec{ + AppliedTo: []crdv1beta1.AppliedTo{ + { + NamespaceSelector: &metav1.LabelSelector{}, + }, + }, + Ingress: []crdv1beta1.Rule{ + { + Action: &allowAction, + From: []crdv1beta1.NetworkPolicyPeer{ + { + Namespaces: &crdv1beta1.PeerNamespaces{ + Match: crdv1beta1.NamespaceMatchSelf, + SameLabels: []string{"test"}, + }, + }, + }, + }, + }, + }, + }, + operation: admv1.Create, + expectedReason: "only one matching criteria can be specified in a single peer namespaces field", + }, + { + name: "acnp-invalid-rule-samelabels-key", + policy: &crdv1beta1.ClusterNetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "acnp-invalid-rule-samelabels-key", + }, + Spec: crdv1beta1.ClusterNetworkPolicySpec{ + AppliedTo: []crdv1beta1.AppliedTo{ + { + NamespaceSelector: &metav1.LabelSelector{}, + }, + }, + Ingress: []crdv1beta1.Rule{ + { + Action: &allowAction, + From: []crdv1beta1.NetworkPolicyPeer{ + { + Namespaces: &crdv1beta1.PeerNamespaces{ + SameLabels: []string{"&illegalKey"}, + }, + }, + }, + }, + }, + }, + }, + operation: admv1.Update, + expectedReason: "Invalid label key in sameLabels rule: &illegalKey", + }, { name: "acnp-toservice-set-with-to", policy: &crdv1beta1.ClusterNetworkPolicy{ diff --git a/test/e2e/antreapolicy_test.go b/test/e2e/antreapolicy_test.go index e083576273b..47c210953df 100644 --- a/test/e2e/antreapolicy_test.go +++ b/test/e2e/antreapolicy_test.go @@ -249,7 +249,7 @@ func testMutateACNPNoTier(t *testing.T) { func testMutateANNPNoTier(t *testing.T) { invalidNpErr := fmt.Errorf("ANNP tier not mutated to default tier") builder := &AntreaNetworkPolicySpecBuilder{} - builder = builder.SetName(getNS("x"), "anp-no-tier"). + builder = builder.SetName(getNS("x"), "annp-no-tier"). SetAppliedToGroup([]ANNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}). SetPriority(10.0) annp := builder.Get() @@ -1748,24 +1748,24 @@ func testACNPPriorityOverride(t *testing.T) { SetPriority(1.001). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": getNS("x")}}}) // Highest priority. Drops traffic from z/b to x/a. - builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": getNS("z")}, - nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-priority2"). SetPriority(1.002). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": getNS("x")}}}) // Medium priority. Allows traffic from z to x/a. - builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, - nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) builder3 := &ClusterNetworkPolicySpecBuilder{} builder3 = builder3.SetName("acnp-priority3"). SetPriority(1.003). SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}) // Lowest priority. Drops traffic from z to x. - builder3.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, - nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder3.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachabilityTwoACNPs := NewReachability(allPods, Connected) reachabilityTwoACNPs.Expect(getPod("z", "a"), getPod("x", "b"), Dropped) @@ -1819,8 +1819,8 @@ func testACNPTierOverride(t *testing.T) { SetPriority(100). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": getNS("x")}}}) // Highest priority tier. Drops traffic from z/b to x/a. - builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": getNS("z")}, - nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-tier-securityops"). @@ -1828,8 +1828,8 @@ func testACNPTierOverride(t *testing.T) { SetPriority(10). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": getNS("x")}}}) // Medium priority tier. Allows traffic from z to x/a. - builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, - nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) builder3 := &ClusterNetworkPolicySpecBuilder{} builder3 = builder3.SetName("acnp-tier-application"). @@ -1837,8 +1837,8 @@ func testACNPTierOverride(t *testing.T) { SetPriority(1). SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}) // Lowest priority tier. Drops traffic from z to x. - builder3.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, - nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder3.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachabilityTwoACNPs := NewReachability(allPods, Connected) reachabilityTwoACNPs.Expect(getPod("z", "a"), getPod("x", "b"), Dropped) @@ -1899,8 +1899,8 @@ func testACNPCustomTiers(t *testing.T) { SetPriority(100). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": getNS("x")}}}) // Medium priority tier. Allows traffic from z to x/a. - builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, - nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) + builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-tier-low"). @@ -1908,8 +1908,8 @@ func testACNPCustomTiers(t *testing.T) { SetPriority(1). SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}) // Lowest priority tier. Drops traffic from z to x. - builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, - nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachabilityTwoACNPs := NewReachability(allPods, Connected) reachabilityTwoACNPs.Expect(getPod("z", "a"), getPod("x", "b"), Dropped) @@ -1944,8 +1944,8 @@ func testACNPPriorityConflictingRule(t *testing.T) { builder1 = builder1.SetName("acnp-drop"). SetPriority(1). SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}) - builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, - nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-allow"). @@ -1953,8 +1953,8 @@ func testACNPPriorityConflictingRule(t *testing.T) { SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}) // The following ingress rule will take no effect as it is exactly the same as ingress rule of cnp-drop, // but cnp-allow has lower priority. - builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, - nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) reachabilityBothACNP := NewReachability(allPods, Connected) reachabilityBothACNP.ExpectEgressToNamespace(getPod("z", "a"), getNS("x"), Dropped) @@ -1983,22 +1983,22 @@ func testACNPRulePriority(t *testing.T) { builder1 = builder1.SetName("acnp-deny"). SetPriority(5). SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}) - builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("y")}, - nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("y")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) // This rule should take no effect as it will be overridden by the first rule of cnp-allow - builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, - nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} // acnp-allow will also apply to all pods in namespace x builder2 = builder2.SetName("acnp-allow"). SetPriority(5). SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}) - builder2.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, - nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) + builder2.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) // This rule should take no effect as it will be overridden by the first rule of cnp-drop - builder2.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("y")}, - nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) + builder2.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("y")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) // Only egress from pods in namespace x to namespace y should be denied reachabilityBothACNP := NewReachability(allPods, Connected) @@ -2026,8 +2026,8 @@ func testACNPPortRange(t *testing.T) { builder = builder.SetName("acnp-deny-a-to-z-egress-port-range"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddEgress(ProtocolTCP, &p8080, nil, &p8082, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, - nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "acnp-port-range", nil) + builder.AddEgress(ProtocolTCP, &p8080, nil, &p8082, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "acnp-port-range", nil) reachability := NewReachability(allPods, Connected) reachability.ExpectEgressToNamespace(getPod("x", "a"), getNS("z"), Dropped) @@ -2056,8 +2056,8 @@ func testACNPRejectEgress(t *testing.T) { builder = builder.SetName("acnp-reject-a-to-z-egress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, - nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionReject, "", "", nil) + builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionReject, "", "", nil) reachability := NewReachability(allPods, Connected) reachability.ExpectEgressToNamespace(getPod("x", "a"), getNS("z"), Rejected) @@ -2085,8 +2085,8 @@ func testACNPRejectIngress(t *testing.T, protocol AntreaPolicyProtocol) { builder = builder.SetName("acnp-reject-a-from-z-ingress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddIngress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, - nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionReject, "", "", nil) + builder.AddIngress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionReject, "", "", nil) reachability := NewReachability(allPods, Connected) reachability.ExpectIngressFromNamespace(getPod("x", "a"), getNS("z"), Rejected) @@ -2574,8 +2574,8 @@ func testAuditLoggingBasic(t *testing.T, data *TestData) { builder = builder.SetName(npName). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": getNS("x")}}}) - builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, - nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", ruleName, nil) + builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", ruleName, nil) builder.AddEgressLogging(logLabel) npRef := fmt.Sprintf("AntreaClusterNetworkPolicy:%s", npName) @@ -2747,7 +2747,7 @@ func testAppliedToPerRule(t *testing.T) { annpATGrp2 := ANNPAppliedToSpec{PodSelector: map[string]string{"pod": "b"}, PodSelectorMatchExp: nil} builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": getNS("x")}, nil, nil, nil, nil, []ANNPAppliedToSpec{annpATGrp1}, crdv1beta1.RuleActionDrop, "", "") - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": getNS("x")}, nil, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": getNS("z")}, nil, nil, nil, nil, []ANNPAppliedToSpec{annpATGrp2}, crdv1beta1.RuleActionDrop, "", "") reachability := NewReachability(allPods, Connected) @@ -2769,10 +2769,10 @@ func testAppliedToPerRule(t *testing.T) { cnpATGrp2 := ACNPAppliedToSpec{ PodSelector: map[string]string{"pod": "b"}, NSSelector: map[string]string{"ns": getNS("y")}, PodSelectorMatchExp: nil, NSSelectorMatchExp: nil} - builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": getNS("x")}, - nil, nil, nil, nil, nil, []ACNPAppliedToSpec{cnpATGrp1}, crdv1beta1.RuleActionDrop, "", "", nil) - builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": getNS("z")}, - nil, nil, nil, nil, nil, []ACNPAppliedToSpec{cnpATGrp2}, crdv1beta1.RuleActionDrop, "", "", nil) + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": getNS("x")}, + nil, nil, nil, nil, []ACNPAppliedToSpec{cnpATGrp1}, crdv1beta1.RuleActionDrop, "", "", nil) + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": getNS("z")}, + nil, nil, nil, nil, []ACNPAppliedToSpec{cnpATGrp2}, crdv1beta1.RuleActionDrop, "", "", nil) reachability2 := NewReachability(allPods, Connected) reachability2.Expect(getPod("x", "b"), getPod("x", "a"), Dropped) @@ -2857,8 +2857,8 @@ func testACNPClusterGroupServiceRefCreateAndUpdate(t *testing.T, data *TestData) builderUpdated := &ClusterNetworkPolicySpecBuilder{} builderUpdated = builderUpdated.SetName("cnp-cg-svc-ref").SetPriority(1.0) builderUpdated.SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": getNS("x")}}}) - builderUpdated.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": getNS("y")}, - nil, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builderUpdated.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": getNS("y")}, + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) // Pod x/a should not allow ingress from y/b per the updated ACNP spec. testStep3 := &TestStep{ @@ -3049,7 +3049,7 @@ func testACNPNamespaceIsolation(t *testing.T) { // deny ingress traffic except from own namespace, which is always allowed. builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, selfNamespace, nil, crdv1beta1.RuleActionAllow, "", "", nil) - builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, nil, + builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachability := NewReachability(allPods, Dropped) @@ -3068,7 +3068,7 @@ func testACNPNamespaceIsolation(t *testing.T) { SetPriority(1.0) builder2.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, selfNamespace, []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}, crdv1beta1.RuleActionAllow, "", "", nil) - builder2.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, nil, nil, nil, + builder2.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, nil, nil, nil, []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}, crdv1beta1.RuleActionDrop, "", "", nil) reachability2 := NewReachability(allPods, Connected) @@ -3101,7 +3101,7 @@ func testACNPStrictNamespacesIsolation(t *testing.T) { SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{}}}) builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, selfNamespace, nil, crdv1beta1.RuleActionPass, "", "", nil) - builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, nil, nil, nil, + builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) // deny ingress traffic except from own namespace, which is delegated to Namespace owners (who can create K8s // NetworkPolicies to regulate intra-Namespace traffic) @@ -3148,7 +3148,7 @@ func testACNPStrictNamespacesIsolationByLabels(t *testing.T) { SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{}}}) builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, samePurposeTierLabels, nil, crdv1beta1.RuleActionPass, "", "", nil) - builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, nil, nil, nil, + builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) // prod1 and prod2 Namespaces should be able to connect to each other. The same goes for dev1 and // dev2 Namespaces. However, any prod Namespace should not be able to connect to any dev Namespace @@ -3192,7 +3192,7 @@ func testACNPStrictNamespacesIsolationBySingleLabel(t *testing.T, data *TestData SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{}}}) builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, samePurposeTierLabels, nil, crdv1beta1.RuleActionPass, "", "", nil) - builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, nil, nil, nil, + builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) // Namespaces are split into two logical groups, purpose=test (prod1,2 and dev1,2) and purpose=test-exclusion // (no-tier). The two groups of Namespace should not be able to connect to each other. @@ -3257,7 +3257,7 @@ func testACNPStrictNamespacesIsolationBySingleLabel(t *testing.T, data *TestData func testFQDNPolicy(t *testing.T) { // The ipv6-only test env doesn't have IPv6 access to the web. skipIfNotIPv4Cluster(t) - // It is convenient to have higher log verbosity for FQDNtests for troubleshooting failures. + // It is convenient to have higher log verbosity for FQDN tests for troubleshooting failures. logLevel := log.GetLevel() log.SetLevel(log.TraceLevel) defer log.SetLevel(logLevel) @@ -3282,31 +3282,31 @@ func testFQDNPolicy(t *testing.T) { // All client Pods below are randomly chosen from test Namespaces. testcases := []podToAddrTestStep{ { - Pod(getNS("x") + "/a"), + getPod("x", "a"), "docs.github.com", 80, Rejected, }, { - Pod(getNS("x") + "/b"), + getPod("x", "b"), "api.github.com", 80, Rejected, }, { - Pod(getNS("y") + "/a"), + getPod("y", "a"), "wayfair.com", 80, Dropped, }, { - Pod(getNS("y") + "/b"), + getPod("y", "b"), "stackoverflow.com", 80, Dropped, }, { - Pod(getNS("z") + "/a"), + getPod("z", "a"), "facebook.com", 80, Connected, @@ -3382,7 +3382,7 @@ func testFQDNPolicyInClusterService(t *testing.T) { for _, service := range services { eachServiceCases := []podToAddrTestStep{ { - Pod(getNS("y") + "/b"), + getPod("y", "b"), // To indicate the server Name is a FQDN, end it with a dot. Then DNS resolver won't attempt to append // domain names (e.g. svc.cluster.local, cluster.local) when resolving it, making it get resolution // result more quickly. @@ -3391,13 +3391,13 @@ func testFQDNPolicyInClusterService(t *testing.T) { Rejected, }, { - Pod(getNS("z") + "/c"), + getPod("z", "c"), svcDNSName(service) + ".", 80, Dropped, }, { - Pod(getNS("x") + "/c"), + getPod("x", "c"), svcDNSName(service) + ".", 80, Connected, @@ -3428,7 +3428,7 @@ func testFQDNPolicyInClusterService(t *testing.T) { func testFQDNPolicyTCP(t *testing.T) { // The ipv6-only test env doesn't have IPv6 access to the web. skipIfNotIPv4Cluster(t) - // It is convenient to have higher log verbosity for FQDNtests for troubleshooting failures. + // It is convenient to have higher log verbosity for FQDN tests for troubleshooting failures. logLevel := log.GetLevel() log.SetLevel(log.TraceLevel) defer log.SetLevel(logLevel) @@ -3646,13 +3646,13 @@ func testACNPNodeSelectorEgress(t *testing.T) { if clusterInfo.podV4NetworkCIDR != "" { ipv4Testcases := []podToAddrTestStep{ { - Pod(getNS("x") + "/a"), + getPod("x", "a"), controlPlaneNodeIPv4(), 6443, Dropped, }, { - Pod(getNS("x") + "/b"), + getPod("x", "b"), controlPlaneNodeIPv4(), 6443, Connected, @@ -3664,13 +3664,13 @@ func testACNPNodeSelectorEgress(t *testing.T) { if clusterInfo.podV6NetworkCIDR != "" { ipv6Testcases := []podToAddrTestStep{ { - Pod(getNS("x") + "/a"), + getPod("x", "a"), controlPlaneNodeIPv6(), 6443, Dropped, }, { - Pod(getNS("x") + "/b"), + getPod("x", "b"), controlPlaneNodeIPv6(), 6443, Connected, diff --git a/test/e2e/nodenetworkpolicy_test.go b/test/e2e/nodenetworkpolicy_test.go index be9cb945ecd..592556d962c 100644 --- a/test/e2e/nodenetworkpolicy_test.go +++ b/test/e2e/nodenetworkpolicy_test.go @@ -37,10 +37,7 @@ func initializeAntreaNodeNetworkPolicy(t *testing.T, data *TestData, toHostNetwo p8082 = 8082 p8085 = 8085 pods = []string{"a"} - suffix := randName("") - namespaces = make(map[string]string) - namespaces["x"] = "x-" + suffix - namespaces["y"] = "y-" + suffix + namespaces = initNamespaceMeta(formFactorNormal) nodes = make(map[string]string) nodes["x"] = controlPlaneNodeName() nodes["y"] = workerNodeName(1) @@ -50,7 +47,6 @@ func initializeAntreaNodeNetworkPolicy(t *testing.T, data *TestData, toHostNetwo hostNetworks["y"] = true } else { hostNetworks["y"] = false - namespaces["z"] = "z-" + suffix nodes["z"] = workerNodeName(1) hostNetworks["z"] = false } @@ -58,7 +54,7 @@ func initializeAntreaNodeNetworkPolicy(t *testing.T, data *TestData, toHostNetwo for _, podName := range pods { for _, ns := range namespaces { - allPods = append(allPods, NewPod(ns, podName)) + allPods = append(allPods, NewPod(ns.Name, podName)) } } @@ -139,25 +135,23 @@ func testNodeACNPAllowNoDefaultIsolation(t *testing.T, protocol AntreaPolicyProt SetPriority(1.1). SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) builder1.AddIngress(protocol, &p81, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-allow-x-to-y-egress"). SetPriority(1.1). SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) builder2.AddEgress(protocol, &p81, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) reachability := NewReachability(allPods, Connected) testStep := []*TestStep{ { - "Port 81", - reachability, - []metav1.Object{builder1.Get(), builder2.Get()}, - []int32{81}, - protocol, - 0, - nil, + Name: "Port 81", + Reachability: reachability, + TestResources: []metav1.Object{builder1.Get(), builder2.Get()}, + Ports: []int32{81}, + Protocol: protocol, }, } testCase := []*TestCase{ @@ -189,19 +183,17 @@ func testNodeACNPDropEgress(t *testing.T, protocol AntreaPolicyProtocol) { SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) builder.AddEgress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["y"]+"/a"), Dropped) + reachability.Expect(getPod("x", "a"), getPod("y", "a"), Dropped) testStep := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{builder.Get()}, - []int32{80}, - protocol, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: protocol, }, } testCase := []*TestCase{ @@ -225,19 +217,17 @@ func testNodeACNPDropIngress(t *testing.T, protocol AntreaPolicyProtocol) { SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) builder.AddIngress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["x"]+"/a"), Dropped) + reachability.Expect(getPod("y", "a"), getPod("x", "a"), Dropped) testStep := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{builder.Get()}, - []int32{80}, - protocol, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: protocol, }, } testCase := []*TestCase{ @@ -253,19 +243,17 @@ func testNodeACNPPortRange(t *testing.T) { SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) builder.AddEgress(ProtocolTCP, &p8080, nil, &p8082, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "acnp-port-range", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "acnp-port-range", nil) reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["y"]+"/a"), Dropped) + reachability.Expect(getPod("x", "a"), getPod("y", "a"), Dropped) testSteps := []*TestStep{ { - fmt.Sprintf("ACNP Drop Ports 8080:8082"), - reachability, - []metav1.Object{builder.Get()}, - []int32{8080, 8081, 8082}, - ProtocolTCP, - 0, - nil, + Name: fmt.Sprintf("ACNP Drop Ports 8080:8082"), + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{8080, 8081, 8082}, + Protocol: ProtocolTCP, }, } @@ -280,7 +268,7 @@ func testNodeACNPPortRange(t *testing.T) { // This test retrieves the port range from the client Pod and uses it in sourcePort and sourceEndPort of an ACNP rule to // verify that packets can be matched by source port. func testNodeACNPSourcePort(t *testing.T) { - portStart, portEnd, err := k8sUtils.getTCPv4SourcePortRangeFromPod(namespaces["x"], "a") + portStart, portEnd, err := k8sUtils.getTCPv4SourcePortRangeFromPod(getNS("x"), "a") failOnError(err, t) builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-source-port"). @@ -304,37 +292,31 @@ func testNodeACNPSourcePort(t *testing.T) { nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["x"]+"/a"), Dropped) + reachability.Expect(getPod("y", "a"), getPod("x", "a"), Dropped) // After adding the dst port constraint of port 80, traffic on port 81 should not be affected. updatedReachability := NewReachability(allPods, Connected) testSteps := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, { - "Port 81", - updatedReachability, - []metav1.Object{builder2.Get()}, - []int32{81}, - ProtocolTCP, - 0, - nil, + Name: "Port 81", + Reachability: updatedReachability, + TestResources: []metav1.Object{builder2.Get()}, + Ports: []int32{81}, + Protocol: ProtocolTCP, }, { - "Port range 80-81", - reachability, - []metav1.Object{builder3.Get()}, - []int32{80, 81}, - ProtocolTCP, - 0, - nil, + Name: "Port range 80-81", + Reachability: reachability, + TestResources: []metav1.Object{builder3.Get()}, + Ports: []int32{80, 81}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -367,7 +349,7 @@ func testNodeACNPRejectEgress(t *testing.T, protocol AntreaPolicyProtocol) { SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) builder.AddEgress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionReject, "", "", nil) reachability := NewReachability(allPods, Connected) @@ -376,16 +358,14 @@ func testNodeACNPRejectEgress(t *testing.T, protocol AntreaPolicyProtocol) { if protocol == ProtocolSCTP { expectedResult = Dropped } - reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["y"]+"/a"), expectedResult) + reachability.Expect(getPod("x", "a"), getPod("y", "a"), expectedResult) testStep := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{builder.Get()}, - []int32{80}, - protocol, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: protocol, }, } testCase := []*TestCase{ @@ -401,19 +381,17 @@ func testNodeACNPRejectIngress(t *testing.T, protocol AntreaPolicyProtocol) { SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) builder.AddIngress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionReject, "", "", nil) reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["x"]+"/a"), Rejected) + reachability.Expect(getPod("y", "a"), getPod("x", "a"), Rejected) testStep := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{builder.Get()}, - []int32{80}, - protocol, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: protocol, }, } testCase := []*TestCase{ @@ -429,31 +407,27 @@ func testNodeACNPNoEffectOnOtherProtocols(t *testing.T) { SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachability1 := NewReachability(allPods, Connected) - reachability1.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["x"]+"/a"), Dropped) + reachability1.Expect(getPod("y", "a"), getPod("x", "a"), Dropped) reachability2 := NewReachability(allPods, Connected) testStep := []*TestStep{ { - "Port 80", - reachability1, - []metav1.Object{builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability1, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, { - "Port 80", - reachability2, - []metav1.Object{builder.Get()}, - []int32{80}, - ProtocolUDP, - 0, - nil, + Name: "Port 80", + Reachability: reachability2, + TestResources: []metav1.Object{builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolUDP, }, } testCase := []*TestCase{ @@ -471,7 +445,7 @@ func testNodeACNPPriorityOverride(t *testing.T) { SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) // Highest priority. Drops traffic from y to x. builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-priority2"). @@ -479,7 +453,7 @@ func testNodeACNPPriorityOverride(t *testing.T) { SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) // Medium priority. Allows traffic from y to x. builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) builder3 := &ClusterNetworkPolicySpecBuilder{} builder3 = builder3.SetName("acnp-priority3"). @@ -487,34 +461,30 @@ func testNodeACNPPriorityOverride(t *testing.T) { SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) // Lowest priority. Drops traffic from y to x. builder3.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachabilityTwoACNPs := NewReachability(allPods, Connected) reachabilityAllACNPs := NewReachability(allPods, Connected) - reachabilityAllACNPs.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["x"]+"/a"), Dropped) + reachabilityAllACNPs.Expect(getPod("y", "a"), getPod("x", "a"), Dropped) testStepTwoACNP := []*TestStep{ { - "Two Policies with different priorities", - reachabilityTwoACNPs, - []metav1.Object{builder3.Get(), builder2.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Two Policies with different priorities", + Reachability: reachabilityTwoACNPs, + TestResources: []metav1.Object{builder3.Get(), builder2.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } // Create the Policies in specific order to make sure that priority re-assignments work as expected. testStepAll := []*TestStep{ { - "All three Policies", - reachabilityAllACNPs, - []metav1.Object{builder3.Get(), builder1.Get(), builder2.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "All three Policies", + Reachability: reachabilityAllACNPs, + TestResources: []metav1.Object{builder3.Get(), builder1.Get(), builder2.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -534,51 +504,47 @@ func testNodeACNPTierOverride(t *testing.T) { SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) // Highest priority tier. Drops traffic from y to x. builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-tier-securityops"). SetTier("securityops"). SetPriority(10). - SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}}) + SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": getNS("x")}}}) // Medium priority tier. Allows traffic from y to x. builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) builder3 := &ClusterNetworkPolicySpecBuilder{} builder3 = builder3.SetName("acnp-tier-application"). SetTier("application"). SetPriority(1). - SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": getNS("x")}}}) // Lowest priority tier. Drops traffic from y to x. builder3.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachabilityTwoACNPs := NewReachability(allPods, Connected) reachabilityAllACNPs := NewReachability(allPods, Connected) - reachabilityAllACNPs.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["x"]+"/a"), Dropped) + reachabilityAllACNPs.Expect(getPod("y", "a"), getPod("x", "a"), Dropped) testStepTwoACNP := []*TestStep{ { - "Two Policies in different tiers", - reachabilityTwoACNPs, - []metav1.Object{builder3.Get(), builder2.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Two Policies in different tiers", + Reachability: reachabilityTwoACNPs, + TestResources: []metav1.Object{builder3.Get(), builder2.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testStepAll := []*TestStep{ { - "All three Policies in different tiers", - reachabilityAllACNPs, - []metav1.Object{builder3.Get(), builder1.Get(), builder2.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "All three Policies in different tiers", + Reachability: reachabilityAllACNPs, + TestResources: []metav1.Object{builder3.Get(), builder1.Get(), builder2.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -606,7 +572,7 @@ func testNodeACNPCustomTiers(t *testing.T) { SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) // Medium priority tier. Allows traffic from y to x. builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-tier-low"). @@ -615,32 +581,28 @@ func testNodeACNPCustomTiers(t *testing.T) { SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) // Lowest priority tier. Drops traffic from y to x. builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachabilityOneACNP := NewReachability(allPods, Connected) - reachabilityOneACNP.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["x"]+"/a"), Dropped) + reachabilityOneACNP.Expect(getPod("y", "a"), getPod("x", "a"), Dropped) testStepOneACNP := []*TestStep{ { - "One Policy", - reachabilityOneACNP, - []metav1.Object{builder2.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "One Policy", + Reachability: reachabilityOneACNP, + TestResources: []metav1.Object{builder2.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } reachabilityTwoACNPs := NewReachability(allPods, Connected) testStepTwoACNP := []*TestStep{ { - "Two Policies in different tiers", - reachabilityTwoACNPs, - []metav1.Object{builder2.Get(), builder1.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Two Policies in different tiers", + Reachability: reachabilityTwoACNPs, + TestResources: []metav1.Object{builder2.Get(), builder1.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -663,7 +625,7 @@ func testNodeACNPPriorityConflictingRule(t *testing.T) { SetPriority(1). SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-allow"). @@ -672,19 +634,17 @@ func testNodeACNPPriorityConflictingRule(t *testing.T) { // The following ingress rule will take no effect as it is exactly the same as ingress rule of cnp-drop, // but cnp-allow has lower priority. builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionAllow, "", "", nil) reachabilityBothACNP := NewReachability(allPods, Connected) - reachabilityBothACNP.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["x"]+"/a"), Dropped) + reachabilityBothACNP.Expect(getPod("y", "a"), getPod("x", "a"), Dropped) testStep := []*TestStep{ { - "Both ACNP", - reachabilityBothACNP, - []metav1.Object{builder1.Get(), builder2.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Both ACNP", + Reachability: reachabilityBothACNP, + TestResources: []metav1.Object{builder1.Get(), builder2.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -699,19 +659,17 @@ func testNodeACNPNamespaceIsolation(t *testing.T) { SetTier("baseline"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) - builder1.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["y"]}, nil, nil, nil, - false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder1.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": getNS("y")}, nil, nil, nil, + nil, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachability1 := NewReachability(allPods, Connected) - reachability1.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["y"], Dropped) + reachability1.ExpectEgressToNamespace(getPod("x", "a"), getNS("y"), Dropped) testStep1 := &TestStep{ - "Port 80", - reachability1, - []metav1.Object{builder1.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability1, + TestResources: []metav1.Object{builder1.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, } testCase := []*TestCase{ @@ -723,40 +681,36 @@ func testNodeACNPNamespaceIsolation(t *testing.T) { func testNodeACNPClusterGroupUpdate(t *testing.T) { cgName := "cg-ns-z-then-y" cgBuilder := &ClusterGroupSpecBuilder{} - cgBuilder = cgBuilder.SetName(cgName).SetNamespaceSelector(map[string]string{"ns": namespaces["z"]}, nil) + cgBuilder = cgBuilder.SetName(cgName).SetNamespaceSelector(map[string]string{"ns": getNS("z")}, nil) // Update CG NS selector to group Pods from Namespace Y updatedCgBuilder := &ClusterGroupSpecBuilder{} - updatedCgBuilder = updatedCgBuilder.SetName(cgName).SetNamespaceSelector(map[string]string{"ns": namespaces["y"]}, nil) + updatedCgBuilder = updatedCgBuilder.SetName(cgName).SetNamespaceSelector(map[string]string{"ns": getNS("y")}, nil) builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-deny-a-to-cg-with-z-egress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) reachability := NewReachability(allPods, Connected) - reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) + reachability.ExpectEgressToNamespace(getPod("x", "a"), getNS("z"), Dropped) updatedReachability := NewReachability(allPods, Connected) - updatedReachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["y"], Dropped) + updatedReachability.ExpectEgressToNamespace(getPod("x", "a"), getNS("y"), Dropped) testStep := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{cgBuilder.Get(), builder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{cgBuilder.Get(), builder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, { - "Port 80 - update", - updatedReachability, - []metav1.Object{updatedCgBuilder.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80 - update", + Reachability: updatedReachability, + TestResources: []metav1.Object{updatedCgBuilder.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -766,8 +720,8 @@ func testNodeACNPClusterGroupUpdate(t *testing.T) { } func testNodeACNPClusterGroupRefRuleIPBlocks(t *testing.T) { - podYAIP, _ := podIPs[namespaces["y"]+"/a"] - podZAIP, _ := podIPs[namespaces["z"]+"/a"] + podYAIP, _ := podIPs[getNS("y")+"/a"] + podZAIP, _ := podIPs[getNS("z")+"/a"] // There are three situations of a Pod's IP(s): // 1. Only one IPv4 address. // 2. Only one IPv6 address. @@ -799,22 +753,20 @@ func testNodeACNPClusterGroupRefRuleIPBlocks(t *testing.T) { SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName2, "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, cgName2, "", nil) reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["y"]+"/a"), Dropped) - reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["z"]+"/a"), Dropped) + reachability.Expect(getPod("x", "a"), getPod("y", "a"), Dropped) + reachability.Expect(getPod("x", "a"), getPod("z", "a"), Dropped) testStep := []*TestStep{ { - "Port 80", - reachability, - []metav1.Object{builder.Get(), cgBuilder.Get(), cgBuilder2.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get(), cgBuilder.Get(), cgBuilder2.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, }, } testCase := []*TestCase{ @@ -826,7 +778,7 @@ func testNodeACNPClusterGroupRefRuleIPBlocks(t *testing.T) { func testNodeACNPNestedClusterGroupCreateAndUpdate(t *testing.T, data *TestData) { cg1Name := "cg-1" cgBuilder1 := &ClusterGroupSpecBuilder{} - cgBuilder1 = cgBuilder1.SetName(cg1Name).SetNamespaceSelector(map[string]string{"ns": namespaces["y"]}, nil) + cgBuilder1 = cgBuilder1.SetName(cg1Name).SetNamespaceSelector(map[string]string{"ns": getNS("y")}, nil) cgNestedName := "cg-nested" cgBuilderNested := &ClusterGroupSpecBuilder{} cgBuilderNested = cgBuilderNested.SetName(cgNestedName).SetChildGroups([]string{cg1Name}) @@ -835,35 +787,31 @@ func testNodeACNPNestedClusterGroupCreateAndUpdate(t *testing.T, data *TestData) builder = builder.SetName("cnp-nested-cg").SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}). AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - false, nil, crdv1beta1.RuleActionDrop, cgNestedName, "", nil) + nil, nil, crdv1beta1.RuleActionDrop, cgNestedName, "", nil) reachability := NewReachability(allPods, Connected) - reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["y"], Dropped) + reachability.ExpectEgressToNamespace(getPod("x", "a"), getNS("y"), Dropped) testStep1 := &TestStep{ - "Port 80", - reachability, + Name: "Port 80", + Reachability: reachability, // Note in this testcase the ClusterGroup is created after the ACNP - []metav1.Object{builder.Get(), cgBuilder1.Get(), cgBuilderNested.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + TestResources: []metav1.Object{builder.Get(), cgBuilder1.Get(), cgBuilderNested.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, } cg2Name := "cg-2" cgBuilder2 := &ClusterGroupSpecBuilder{} - cgBuilder2 = cgBuilder2.SetName(cg2Name).SetNamespaceSelector(map[string]string{"ns": namespaces["z"]}, nil) + cgBuilder2 = cgBuilder2.SetName(cg2Name).SetNamespaceSelector(map[string]string{"ns": getNS("z")}, nil) cgBuilderNested = cgBuilderNested.SetChildGroups([]string{cg2Name}) reachability2 := NewReachability(allPods, Connected) - reachability2.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) + reachability2.ExpectEgressToNamespace(getPod("x", "a"), getNS("z"), Dropped) testStep2 := &TestStep{ - "Port 80 updated", - reachability2, - []metav1.Object{cgBuilder2.Get(), cgBuilderNested.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80 updated", + Reachability: reachability2, + TestResources: []metav1.Object{cgBuilder2.Get(), cgBuilderNested.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, } testSteps := []*TestStep{testStep1, testStep2} @@ -874,8 +822,8 @@ func testNodeACNPNestedClusterGroupCreateAndUpdate(t *testing.T, data *TestData) } func testNodeACNPNestedIPBlockClusterGroupCreateAndUpdate(t *testing.T) { - podYAIP, _ := podIPs[namespaces["y"]+"/a"] - podZAIP, _ := podIPs[namespaces["z"]+"/a"] + podYAIP, _ := podIPs[getPodName("y", "a")] + podZAIP, _ := podIPs[getPodName("z", "a")] genCIDR := func(ip string) string { switch IPFamily(ip) { case "v4": @@ -905,33 +853,29 @@ func testNodeACNPNestedIPBlockClusterGroupCreateAndUpdate(t *testing.T) { SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgParentName, "", nil) + nil, nil, nil, nil, nil, crdv1beta1.RuleActionDrop, cgParentName, "", nil) reachability := NewReachability(allPods, Connected) - reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["y"]+"/a"), Dropped) - reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["z"]+"/a"), Dropped) + reachability.Expect(getPod("x", "a"), getPod("y", "a"), Dropped) + reachability.Expect(getPod("x", "a"), getPod("z", "a"), Dropped) testStep := &TestStep{ - "Port 80", - reachability, - []metav1.Object{builder.Get(), cgBuilder1.Get(), cgBuilder2.Get(), cgParent.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80", + Reachability: reachability, + TestResources: []metav1.Object{builder.Get(), cgBuilder1.Get(), cgBuilder2.Get(), cgParent.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, } cgParent = cgParent.SetChildGroups([]string{cg1Name}) reachability2 := NewReachability(allPods, Connected) - reachability2.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["y"]+"/a"), Dropped) + reachability2.Expect(getPod("x", "a"), getPod("y", "a"), Dropped) testStep2 := &TestStep{ - "Port 80, updated", - reachability2, - []metav1.Object{cgParent.Get()}, - []int32{80}, - ProtocolTCP, - 0, - nil, + Name: "Port 80, updated", + Reachability: reachability2, + TestResources: []metav1.Object{cgParent.Get()}, + Ports: []int32{80}, + Protocol: ProtocolTCP, } testCase := []*TestCase{ From b8a8acee3c9dd610064fa542365eee07a5933273 Mon Sep 17 00:00:00 2001 From: Dyanngg Date: Thu, 7 Mar 2024 12:11:22 -0800 Subject: [PATCH 7/7] Address more comments Signed-off-by: Dyanngg --- multicluster/test/e2e/antreapolicy_test.go | 3 +- .../networkpolicy/clusternetworkpolicy.go | 6 +- .../clusternetworkpolicy_test.go | 51 +------ test/e2e/antreaipam_anp_test.go | 14 +- test/e2e/antreapolicy_test.go | 143 ++++++++---------- test/e2e/clustergroup_test.go | 2 +- test/e2e/group_test.go | 2 +- test/e2e/k8s_util.go | 40 +---- test/e2e/nodenetworkpolicy_test.go | 14 +- 9 files changed, 94 insertions(+), 181 deletions(-) diff --git a/multicluster/test/e2e/antreapolicy_test.go b/multicluster/test/e2e/antreapolicy_test.go index 88df87af282..9bb2f5ad79b 100644 --- a/multicluster/test/e2e/antreapolicy_test.go +++ b/multicluster/test/e2e/antreapolicy_test.go @@ -54,8 +54,7 @@ func failOnError(err error, t *testing.T) { func initializeForPolicyTest(t *testing.T, data *MCTestData) { perNamespacePods = []string{"a", "b", "c"} perClusterNamespaces = make(map[string]antreae2e.TestNamespaceMeta) - nss := []string{"x", "y", "z"} - for _, ns := range nss { + for _, ns := range []string{"x", "y", "z"} { perClusterNamespaces[ns] = antreae2e.TestNamespaceMeta{Name: ns} } diff --git a/pkg/controller/networkpolicy/clusternetworkpolicy.go b/pkg/controller/networkpolicy/clusternetworkpolicy.go index 23e3405879f..6e83f2a2f8c 100644 --- a/pkg/controller/networkpolicy/clusternetworkpolicy.go +++ b/pkg/controller/networkpolicy/clusternetworkpolicy.go @@ -16,6 +16,7 @@ package networkpolicy import ( "reflect" + "sort" "strings" v1 "k8s.io/api/core/v1" @@ -631,7 +632,9 @@ func groupNamespacesByLabelValue(affectedNSAndLabels map[string]labels.Set, labe func getLabelValues(labels map[string]string, labelKeys []string) string { key := "" for _, k := range labelKeys { - if v, ok := labels[k]; ok { + if v, ok := labels[k]; !ok { + return "" + } else { key += v + labelValueSeparator } } @@ -682,6 +685,7 @@ func (n *NetworkPolicyController) toAntreaPeerForSameLabelNamespaces(peer crdv1b LabelIdentities: labelIdentities, } var atgs []*antreatypes.AppliedToGroup + sort.Strings(namespacesByLabelValues) for _, ns := range namespacesByLabelValues { atgForNamespace, _ := atgPerAffectedNS[ns] atgs = append(atgs, atgForNamespace) diff --git a/pkg/controller/networkpolicy/clusternetworkpolicy_test.go b/pkg/controller/networkpolicy/clusternetworkpolicy_test.go index fb8a250c741..3241723ec1a 100644 --- a/pkg/controller/networkpolicy/clusternetworkpolicy_test.go +++ b/pkg/controller/networkpolicy/clusternetworkpolicy_test.go @@ -17,8 +17,6 @@ package networkpolicy import ( "fmt" "net" - "reflect" - "sort" "testing" "github.com/stretchr/testify/assert" @@ -36,48 +34,6 @@ import ( "antrea.io/antrea/pkg/util/k8s" ) -// ruleSemanticallyEqual compares two NetworkPolicyRule objects. It disregards -// the appliedToGroup slice element order as long as two rules' appliedToGroups -// have same elements. -func ruleSemanticallyEqual(a, b controlplane.NetworkPolicyRule) bool { - sort.Strings(a.AppliedToGroups) - sort.Strings(b.AppliedToGroups) - return reflect.DeepEqual(a, b) -} - -// diffNetworkPolicyRuleList checks if elements in two controlplane.NetworkPolicyRule -// slices are equal. If not, it returns the unmatched NetworkPolicyRules. -func diffNetworkPolicyRuleList(a, b []controlplane.NetworkPolicyRule) (extraA, extraB []controlplane.NetworkPolicyRule) { - if len(a) != len(b) { - return nil, nil - } - // Mark indexes in b that has already matched - visited := make([]bool, len(b)) - for i := 0; i < len(a); i++ { - found := false - for j := 0; j < len(b); j++ { - if visited[j] { - continue - } - if ruleSemanticallyEqual(a[i], b[j]) { - visited[j] = true - found = true - break - } - } - if !found { - extraA = append(extraA, a[i]) - } - } - for j := 0; j < len(b); j++ { - if visited[j] { - continue - } - extraB = append(extraB, b[j]) - } - return -} - func TestProcessClusterNetworkPolicy(t *testing.T) { p10 := float64(10) t10 := int32(10) @@ -1052,8 +1008,8 @@ func TestProcessClusterNetworkPolicy(t *testing.T) { { Direction: controlplane.DirectionIn, AppliedToGroups: []string{ - getNormalizedUID(antreatypes.NewGroupSelector("nsC", nil, nil, nil, nil).NormalizedName), getNormalizedUID(antreatypes.NewGroupSelector("nsB", nil, nil, nil, nil).NormalizedName), + getNormalizedUID(antreatypes.NewGroupSelector("nsC", nil, nil, nil, nil).NormalizedName), }, From: controlplane.NetworkPolicyPeer{ AddressGroups: []string{ @@ -1947,10 +1903,7 @@ func TestProcessClusterNetworkPolicy(t *testing.T) { assert.Equal(t, tt.expectedPolicy.Priority, actualPolicy.Priority) assert.Equal(t, tt.expectedPolicy.TierPriority, actualPolicy.TierPriority) assert.Equal(t, tt.expectedPolicy.AppliedToPerRule, actualPolicy.AppliedToPerRule) - missingExpectedRules, extraActualRules := diffNetworkPolicyRuleList(tt.expectedPolicy.Rules, actualPolicy.Rules) - if len(missingExpectedRules) > 0 || len(extraActualRules) > 0 { - t.Errorf("Unexpected rules in processed policy. Missing expected rules: %v. Extra actual rules: %v", missingExpectedRules, extraActualRules) - } + assert.ElementsMatch(t, tt.expectedPolicy.Rules, actualPolicy.Rules) assert.ElementsMatch(t, tt.expectedPolicy.AppliedToGroups, actualPolicy.AppliedToGroups) assert.Equal(t, tt.expectedAppliedToGroups, len(actualAppliedToGroups)) assert.Equal(t, tt.expectedAddressGroups, len(actualAddressGroups)) diff --git a/test/e2e/antreaipam_anp_test.go b/test/e2e/antreaipam_anp_test.go index 7ee0504f413..7e6e191cca8 100644 --- a/test/e2e/antreaipam_anp_test.go +++ b/test/e2e/antreaipam_anp_test.go @@ -28,13 +28,7 @@ import ( // initializeAntreaIPAM must be called after Namespace in antreaIPAMNamespaces created func initializeAntreaIPAM(t *testing.T, data *TestData) { - p80 = 80 - p81 = 81 - p8080 = 8080 - p8081 = 8081 - p8082 = 8082 - p8085 = 8085 - pods = []string{"a", "b", "c"} + podsPerNamespace = []string{"a", "b", "c"} namespaces = make(map[string]TestNamespaceMeta) regularNamespaces := make(map[string]TestNamespaceMeta) suffix := randName("") @@ -50,7 +44,7 @@ func initializeAntreaIPAM(t *testing.T, data *TestData) { for _, ns := range antreaIPAMNamespaces { namespaces[ns] = TestNamespaceMeta{Name: ns} } - for _, podName := range pods { + for _, podName := range podsPerNamespace { for _, ns := range namespaces { allPods = append(allPods, NewPod(ns.Name, podName)) podsByNamespace[ns.Name] = append(podsByNamespace[ns.Name], NewPod(ns.Name, podName)) @@ -61,9 +55,9 @@ func initializeAntreaIPAM(t *testing.T, data *TestData) { // k8sUtils is a global var k8sUtils, err = NewKubernetesUtils(data) failOnError(err, t) - _, err = k8sUtils.Bootstrap(regularNamespaces, pods, true, nil, nil) + _, err = k8sUtils.Bootstrap(regularNamespaces, podsPerNamespace, true, nil, nil) failOnError(err, t) - ips, err := k8sUtils.Bootstrap(namespaces, pods, false, nil, nil) + ips, err := k8sUtils.Bootstrap(namespaces, podsPerNamespace, false, nil, nil) failOnError(err, t) podIPs = ips } diff --git a/test/e2e/antreapolicy_test.go b/test/e2e/antreapolicy_test.go index 47c210953df..ffcd5d0ca1e 100644 --- a/test/e2e/antreapolicy_test.go +++ b/test/e2e/antreapolicy_test.go @@ -45,16 +45,22 @@ import ( // common for all tests. var ( - allPods []Pod - podsByNamespace map[string][]Pod - k8sUtils *KubernetesUtils - allTestList []*TestCase - pods []string - namespaces map[string]TestNamespaceMeta - podIPs map[string][]string - p80, p81, p8080, p8081, p8082, p8085, p6443 int32 - nodes map[string]string - selfNamespace *crdv1beta1.PeerNamespaces + p80 int32 = 80 + p81 int32 = 81 + p6443 int32 = 6443 + p8080 int32 = 8080 + p8081 int32 = 8081 + p8082 int32 = 8082 + p8085 int32 = 8085 + allPods []Pod + podsByNamespace map[string][]Pod + k8sUtils *KubernetesUtils + allTestList []*TestCase + podsPerNamespace []string + namespaces map[string]TestNamespaceMeta + podIPs map[string][]string + nodes map[string]string + selfNamespace *crdv1beta1.PeerNamespaces ) const ( @@ -66,11 +72,9 @@ const ( // Verification of deleting/creating resources timed out. timeout = 10 * time.Second // audit log directory on Antrea Agent - logDir = "/var/log/antrea/networkpolicy/" - logfileName = "np.log" - defaultTierName = "application" - formFactorNormal = "3by3PodWorkloads" - formFactorLarge = "extraNamespaces" + logDir = "/var/log/antrea/networkpolicy/" + logfileName = "np.log" + defaultTierName = "application" ) func failOnError(err error, t *testing.T) { @@ -105,74 +109,28 @@ func getPodName(ns, po string) string { return namespaces[ns].Name + "/" + po } -// initNamespaceMeta populates the test Namespaces metadata. -// There are two form factors for test workload Namespaces: -// -// Normal: three Namespaces x, y, z. -// Large: two "prod" Namespaces labeled purpose=test and tier=prod. -// two "dev" Namespaces labeled purpose=test and tier=dev. -// one "no-tier-label" Namespace labeled purpose=test. -// -// The large form factor workloads are used for testcases where advanced -// Namespace matching in policies are required. -func initNamespaceMeta(formFactor string) map[string]TestNamespaceMeta { - allNamespaceMeta := make(map[string]TestNamespaceMeta) - suffix := randName("") - if formFactor == formFactorLarge { - for i := 1; i < 3; i++ { - prodNS := TestNamespaceMeta{ - Name: "prod" + strconv.Itoa(i) + "-" + suffix, - Labels: map[string]string{ - "purpose": "test", - "tier": "prod", - }, - } - allNamespaceMeta["prod"+strconv.Itoa(i)] = prodNS - devNS := TestNamespaceMeta{ - Name: "dev" + strconv.Itoa(i) + "-" + suffix, - Labels: map[string]string{ - "purpose": "test", - "tier": "dev", - }, - } - allNamespaceMeta["dev"+strconv.Itoa(i)] = devNS - } - allNamespaceMeta["no-tier"] = TestNamespaceMeta{ - Name: "no-tier-" + suffix, - Labels: map[string]string{ - "purpose": "test-exclusion", - }, - } - } else if formFactor == formFactorNormal { - nss := []string{"x", "y", "z"} - for _, ns := range nss { - allNamespaceMeta[ns] = TestNamespaceMeta{ +func initialize(t *testing.T, data *TestData, customNamespaces map[string]TestNamespaceMeta) { + selfNamespace = &crdv1beta1.PeerNamespaces{ + Match: crdv1beta1.NamespaceMatchSelf, + } + namespaces = make(map[string]TestNamespaceMeta) + if customNamespaces != nil { + namespaces = customNamespaces + } else { + suffix := randName("") + for _, ns := range []string{"x", "y", "z"} { + namespaces[ns] = TestNamespaceMeta{ Name: ns + "-" + suffix, } } } - return allNamespaceMeta -} - -func initialize(t *testing.T, data *TestData, formFactor string) { - p80 = 80 - p81 = 81 - p8080 = 8080 - p8081 = 8081 - p8082 = 8082 - p8085 = 8085 - selfNamespace = &crdv1beta1.PeerNamespaces{ - Match: crdv1beta1.NamespaceMatchSelf, - } - pods = []string{"a", "b", "c"} - namespaces = initNamespaceMeta(formFactor) // This function "initialize" will be used more than once, and variable "allPods" is global. // It should be empty every time when "initialize" is performed, otherwise there will be unexpected // results. allPods = []Pod{} podsByNamespace = make(map[string][]Pod) - - for _, podName := range pods { + podsPerNamespace = []string{"a", "b", "c"} + for _, podName := range podsPerNamespace { for _, ns := range namespaces { allPods = append(allPods, NewPod(ns.Name, podName)) podsByNamespace[ns.Name] = append(podsByNamespace[ns.Name], NewPod(ns.Name, podName)) @@ -184,7 +142,7 @@ func initialize(t *testing.T, data *TestData, formFactor string) { // k8sUtils is a global var k8sUtils, err = NewKubernetesUtils(data) failOnError(err, t) - ips, err := k8sUtils.Bootstrap(namespaces, pods, true, nil, nil) + ips, err := k8sUtils.Bootstrap(namespaces, podsPerNamespace, true, nil, nil) failOnError(err, t) podIPs = ips } @@ -4373,7 +4331,7 @@ func TestAntreaPolicy(t *testing.T) { } defer teardownTest(t, data) - initialize(t, data, formFactorNormal) + initialize(t, data, nil) // This test group only provides one case for each CR, including ACNP, ANNP, Tier, // ClusterGroup and Group to make sure the corresponding validation webhooks is @@ -4514,7 +4472,36 @@ func TestAntreaPolicyExtendedNamespaces(t *testing.T) { } defer teardownTest(t, data) - initialize(t, data, formFactorLarge) + extendedNamespaces := make(map[string]TestNamespaceMeta) + suffix := randName("") + // two "prod" Namespaces labeled purpose=test and tier=prod. + // two "dev" Namespaces labeled purpose=test and tier=dev. + // one "no-tier-label" Namespace labeled purpose=test. + for i := 1; i <= 2; i++ { + prodNS := TestNamespaceMeta{ + Name: "prod" + strconv.Itoa(i) + "-" + suffix, + Labels: map[string]string{ + "purpose": "test", + "tier": "prod", + }, + } + extendedNamespaces["prod"+strconv.Itoa(i)] = prodNS + devNS := TestNamespaceMeta{ + Name: "dev" + strconv.Itoa(i) + "-" + suffix, + Labels: map[string]string{ + "purpose": "test", + "tier": "dev", + }, + } + extendedNamespaces["dev"+strconv.Itoa(i)] = devNS + } + extendedNamespaces["no-tier"] = TestNamespaceMeta{ + Name: "no-tier-" + suffix, + Labels: map[string]string{ + "purpose": "test-exclusion", + }, + } + initialize(t, data, extendedNamespaces) t.Run("TestGroupACNPNamespaceLabelSelections", func(t *testing.T) { t.Run("Case=ACNPStrictNamespacesIsolationByLabels", func(t *testing.T) { testACNPStrictNamespacesIsolationByLabels(t) }) @@ -4658,7 +4645,7 @@ func TestAntreaPolicyStatusWithAppliedToUnsupportedGroup(t *testing.T) { } defer teardownTest(t, data) - initialize(t, data, formFactorNormal) + initialize(t, data, nil) testNamespace := getNS("x") // Build a Group with namespaceSelector selecting namespaces outside testNamespace. diff --git a/test/e2e/clustergroup_test.go b/test/e2e/clustergroup_test.go index 9308cd7b32f..6576546b1ae 100644 --- a/test/e2e/clustergroup_test.go +++ b/test/e2e/clustergroup_test.go @@ -320,7 +320,7 @@ func TestClusterGroup(t *testing.T) { } defer teardownTest(t, data) - initialize(t, data, formFactorNormal) + initialize(t, data, nil) t.Run("TestGroupClusterGroupValidate", func(t *testing.T) { t.Run("Case=IPBlockWithPodSelectorDenied", func(t *testing.T) { testInvalidCGIPBlockWithPodSelector(t) }) diff --git a/test/e2e/group_test.go b/test/e2e/group_test.go index 487d647047d..121c1a46627 100644 --- a/test/e2e/group_test.go +++ b/test/e2e/group_test.go @@ -263,7 +263,7 @@ func TestGroup(t *testing.T) { t.Fatalf("Error when setting up test: %v", err) } defer teardownTest(t, data) - initialize(t, data, formFactorNormal) + initialize(t, data, nil) t.Run("TestGroupNamespacedGroupValidate", func(t *testing.T) { t.Run("Case=IPBlockWithPodSelectorDenied", func(t *testing.T) { testInvalidGroupIPBlockWithPodSelector(t) }) diff --git a/test/e2e/k8s_util.go b/test/e2e/k8s_util.go index 71038752779..7f1570822a6 100644 --- a/test/e2e/k8s_util.go +++ b/test/e2e/k8s_util.go @@ -840,35 +840,6 @@ func (k *KubernetesUtils) GetCG(name string) (*crdv1beta1.ClusterGroup, error) { return k.crdClient.CrdV1beta1().ClusterGroups().Get(context.TODO(), name, metav1.GetOptions{}) } -// CreateGroup is a convenience function for creating an Antrea Group by namespace, name and selector. -func (k *KubernetesUtils) CreateGroup(namespace, name string, pSelector, nSelector *metav1.LabelSelector, ipBlocks []crdv1beta1.IPBlock) (*crdv1beta1.Group, error) { - log.Infof("Creating group %s/%s", namespace, name) - _, err := k.crdClient.CrdV1alpha3().Groups(namespace).Get(context.TODO(), name, metav1.GetOptions{}) - if err != nil { - g := &crdv1beta1.Group{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: name, - }, - } - if pSelector != nil { - g.Spec.PodSelector = pSelector - } - if nSelector != nil { - g.Spec.NamespaceSelector = nSelector - } - if len(ipBlocks) > 0 { - g.Spec.IPBlocks = ipBlocks - } - g, err = k.crdClient.CrdV1beta1().Groups(namespace).Create(context.TODO(), g, metav1.CreateOptions{}) - if err != nil { - log.Debugf("Unable to create group %s/%s: %s", namespace, name, err) - } - return g, err - } - return nil, fmt.Errorf("group with name %s/%s already exists", namespace, name) -} - // GetGroup is a convenience function for getting Groups func (k *KubernetesUtils) GetGroup(namespace, name string) (*crdv1beta1.Group, error) { return k.crdClient.CrdV1beta1().Groups(namespace).Get(context.TODO(), name, metav1.GetOptions{}) @@ -1138,7 +1109,7 @@ func (k *KubernetesUtils) ValidateRemoteCluster(remoteCluster *KubernetesUtils, } } -func (k *KubernetesUtils) Bootstrap(namespaces map[string]TestNamespaceMeta, pods []string, createNamespaces bool, nodeNames map[string]string, hostNetworks map[string]bool) (map[string][]string, error) { +func (k *KubernetesUtils) Bootstrap(namespaces map[string]TestNamespaceMeta, podsPerNamespace []string, createNamespaces bool, nodeNames map[string]string, hostNetworks map[string]bool) (map[string][]string, error) { for key, ns := range namespaces { if createNamespaces { if ns.Labels == nil { @@ -1146,8 +1117,7 @@ func (k *KubernetesUtils) Bootstrap(namespaces map[string]TestNamespaceMeta, pod } // convenience label for testing ns.Labels["ns"] = ns.Name - _, err := k.CreateOrUpdateNamespace(ns.Name, ns.Labels) - if err != nil { + if _, err := k.CreateOrUpdateNamespace(ns.Name, ns.Labels); err != nil { return nil, fmt.Errorf("unable to create/update ns %s: %w", ns, err) } } @@ -1159,7 +1129,7 @@ func (k *KubernetesUtils) Bootstrap(namespaces map[string]TestNamespaceMeta, pod if hostNetworks != nil { hostNetwork = hostNetworks[key] } - for _, pod := range pods { + for _, pod := range podsPerNamespace { log.Infof("Creating/updating Pod '%s/%s'", ns, pod) deployment := ns.Name + pod _, err := k.CreateOrUpdateDeployment(ns.Name, deployment, 1, map[string]string{"pod": pod, "app": pod}, nodeName, hostNetwork) @@ -1169,8 +1139,8 @@ func (k *KubernetesUtils) Bootstrap(namespaces map[string]TestNamespaceMeta, pod } } var allPods []Pod - podIPs := make(map[string][]string, len(pods)*len(namespaces)) - for _, podName := range pods { + podIPs := make(map[string][]string, len(podsPerNamespace)*len(namespaces)) + for _, podName := range podsPerNamespace { for _, ns := range namespaces { allPods = append(allPods, NewPod(ns.Name, podName)) } diff --git a/test/e2e/nodenetworkpolicy_test.go b/test/e2e/nodenetworkpolicy_test.go index 592556d962c..5564fd37329 100644 --- a/test/e2e/nodenetworkpolicy_test.go +++ b/test/e2e/nodenetworkpolicy_test.go @@ -36,8 +36,14 @@ func initializeAntreaNodeNetworkPolicy(t *testing.T, data *TestData, toHostNetwo p8081 = 8081 p8082 = 8082 p8085 = 8085 - pods = []string{"a"} - namespaces = initNamespaceMeta(formFactorNormal) + podsPerNamespace = []string{"a"} + suffix := randName("") + namespaces = make(map[string]TestNamespaceMeta) + for _, ns := range []string{"x", "y", "z"} { + namespaces[ns] = TestNamespaceMeta{ + Name: ns + "-" + suffix, + } + } nodes = make(map[string]string) nodes["x"] = controlPlaneNodeName() nodes["y"] = workerNodeName(1) @@ -52,7 +58,7 @@ func initializeAntreaNodeNetworkPolicy(t *testing.T, data *TestData, toHostNetwo } allPods = []Pod{} - for _, podName := range pods { + for _, podName := range podsPerNamespace { for _, ns := range namespaces { allPods = append(allPods, NewPod(ns.Name, podName)) } @@ -62,7 +68,7 @@ func initializeAntreaNodeNetworkPolicy(t *testing.T, data *TestData, toHostNetwo // k8sUtils is a global var k8sUtils, err = NewKubernetesUtils(data) failOnError(err, t) - ips, err := k8sUtils.Bootstrap(namespaces, pods, true, nodes, hostNetworks) + ips, err := k8sUtils.Bootstrap(namespaces, podsPerNamespace, true, nodes, hostNetworks) failOnError(err, t) podIPs = ips }