diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 600a98c2a..3e30a3ebe 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -6,3 +6,33 @@ updates: interval: "weekly" commit-message: prefix: "chore" + + - package-ecosystem: "gomod" + directory: "/" + schedule: + interval: "weekly" + commit-message: + prefix: "chore" + labels: + - "dependencies" + open-pull-requests-limit: 10 + ignore: + - dependency-name: "*" + update-types: ["version-update:semver-major"] + groups: + kubernetes: + patterns: + - "k8s.io/*" + - "sigs.k8s.io/*" + azure-sdk: + patterns: + - "github.com/Azure/*" + - "github.com/AzureAD/*" + all-others: + patterns: + - "*" + exclude-patterns: + - "k8s.io/*" + - "sigs.k8s.io/*" + - "github.com/Azure/*" + - "github.com/AzureAD/*" diff --git a/.github/workflows/chart.yml b/.github/workflows/chart.yml index 17b71aca6..e01ef19ec 100644 --- a/.github/workflows/chart.yml +++ b/.github/workflows/chart.yml @@ -57,18 +57,19 @@ jobs: run: | set -euo pipefail - RELEASE_VERSION="${{ needs.export-registry.outputs.version }}" - + RELEASE_TAG="${{ needs.export-registry.outputs.tag }}" + CHART_VERSION="${{ needs.export-registry.outputs.version }}" OCI_REGISTRY="${{ needs.export-registry.outputs.registry }}/charts" - make helm-push REGISTRY="${OCI_REGISTRY}" TAG="${RELEASE_VERSION}" + + make helm-push REGISTRY="${OCI_REGISTRY}" TAG="${RELEASE_TAG}" CHART_VERSION="${CHART_VERSION}" - name: Verify chart appVersion matches release tag run: | set -euo pipefail - RELEASE_VERSION="${{ needs.export-registry.outputs.version }}" - CHART_VERSION="${RELEASE_VERSION}" - EXPECTED_APP_VERSION="${RELEASE_VERSION}" + RELEASE_TAG="${{ needs.export-registry.outputs.tag }}" + CHART_VERSION="${{ needs.export-registry.outputs.version }}" + EXPECTED_APP_VERSION="${RELEASE_TAG}" rm -rf .helm-verify mkdir -p .helm-verify diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d40015701..d045d76c0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -184,7 +184,7 @@ jobs: - name: Upload logs if: always() - uses: actions/upload-artifact@v6 + uses: actions/upload-artifact@v7 with: name: e2e-logs-${{ matrix.customized-settings }} path: test/e2e/logs-${{ matrix.customized-settings }}/ diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml index 7ebd6f413..ee9d7ae79 100644 --- a/.github/workflows/codespell.yml +++ b/.github/workflows/codespell.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Harden Runner - uses: step-security/harden-runner@a90bcbc6539c36a85cdfeb73f7e2f433735f215b # v2.15.0 + uses: step-security/harden-runner@58077d3c7e43986b6b15fba718e8ea69e387dfcc # v2.15.1 with: egress-policy: audit diff --git a/Makefile b/Makefile index 7df29740f..65adfb0a1 100644 --- a/Makefile +++ b/Makefile @@ -66,7 +66,7 @@ STATICCHECK_VER := v0.6.1 STATICCHECK_BIN := staticcheck STATICCHECK := $(abspath $(TOOLS_BIN_DIR)/$(STATICCHECK_BIN)-$(STATICCHECK_VER)) -GOIMPORTS_VER := latest +GOIMPORTS_VER := v0.42.0 GOIMPORTS_BIN := goimports GOIMPORTS := $(abspath $(TOOLS_BIN_DIR)/$(GOIMPORTS_BIN)-$(GOIMPORTS_VER)) @@ -290,10 +290,10 @@ push: ## Build and push all Docker images .PHONY: helm-push helm-push: ## Package and push Helm charts to OCI registry - helm package charts/hub-agent --version $(TAG) --app-version $(TAG) --destination .helm-packages - helm package charts/member-agent --version $(TAG) --app-version $(TAG) --destination .helm-packages - helm push .helm-packages/hub-agent-$(TAG).tgz oci://$(REGISTRY) - helm push .helm-packages/member-agent-$(TAG).tgz oci://$(REGISTRY) + helm package charts/hub-agent --version $(CHART_VERSION) --app-version $(TAG) --destination .helm-packages + helm package charts/member-agent --version $(CHART_VERSION) --app-version $(TAG) --destination .helm-packages + helm push .helm-packages/hub-agent-$(CHART_VERSION).tgz oci://$(REGISTRY) + helm push .helm-packages/member-agent-$(CHART_VERSION).tgz oci://$(REGISTRY) rm -rf .helm-packages # By default, docker buildx create will pull image moby/buildkit:buildx-stable-1 and hit the too many requests error diff --git a/charts/hub-agent/Chart.yaml b/charts/hub-agent/Chart.yaml index c5f4e7768..efa8648a8 100644 --- a/charts/hub-agent/Chart.yaml +++ b/charts/hub-agent/Chart.yaml @@ -21,4 +21,4 @@ version: 0.1.0 # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "0.1.0" +appVersion: "v0.1.0" diff --git a/charts/hub-agent/README.md b/charts/hub-agent/README.md index 84dd27c20..12663dffb 100644 --- a/charts/hub-agent/README.md +++ b/charts/hub-agent/README.md @@ -103,7 +103,6 @@ _See [helm install](https://helm.sh/docs/helm/helm_install/) for command documen | `webhookClientConnectionType` | Connection type for webhook client (service or url) | `service` | | `useCertManager` | Use cert-manager for webhook certificate management (requires `enableWorkload=true`) | `false` | | `webhookCertSecretName` | Name of the Secret where cert-manager stores the certificate (required when enabled) | `unset` | -| `enableV1Beta1APIs` | Watch for v1beta1 APIs | `true` | | `enableClusterInventoryAPI` | Enable cluster inventory APIs | `true` | | `enableStagedUpdateRunAPIs` | Enable staged update run APIs | `true` | | `enableEvictionAPIs` | Enable eviction APIs | `true` | diff --git a/charts/hub-agent/templates/crds/crps.yaml b/charts/hub-agent/templates/crds/crps.yaml index 8e92ec2c5..5701e114a 100644 --- a/charts/hub-agent/templates/crds/crps.yaml +++ b/charts/hub-agent/templates/crds/crps.yaml @@ -1,4 +1,2 @@ {{- $files := .Files }} -{{- if .Values.enableV1Beta1APIs }} {{ $files.Get "crdbases/placement.kubernetes-fleet.io_clusterresourceplacements.yaml" }} -{{- end }} diff --git a/charts/hub-agent/templates/crds/internalmemberclusters.yaml b/charts/hub-agent/templates/crds/internalmemberclusters.yaml index f036fa06f..321ae5430 100644 --- a/charts/hub-agent/templates/crds/internalmemberclusters.yaml +++ b/charts/hub-agent/templates/crds/internalmemberclusters.yaml @@ -1,4 +1,2 @@ {{- $files := .Files }} -{{- if .Values.enableV1Beta1APIs }} {{ $files.Get "crdbases/cluster.kubernetes-fleet.io_internalmemberclusters.yaml" }} -{{- end }} diff --git a/charts/hub-agent/templates/crds/memberclusters.yaml b/charts/hub-agent/templates/crds/memberclusters.yaml index a77392097..79e4692ff 100644 --- a/charts/hub-agent/templates/crds/memberclusters.yaml +++ b/charts/hub-agent/templates/crds/memberclusters.yaml @@ -1,4 +1,2 @@ {{- $files := .Files }} -{{- if .Values.enableV1Beta1APIs }} {{ $files.Get "crdbases/cluster.kubernetes-fleet.io_memberclusters.yaml" }} -{{- end }} diff --git a/charts/hub-agent/templates/crds/works.yaml b/charts/hub-agent/templates/crds/works.yaml index ad3fa6924..5ed5889f1 100644 --- a/charts/hub-agent/templates/crds/works.yaml +++ b/charts/hub-agent/templates/crds/works.yaml @@ -1,4 +1,2 @@ {{- $files := .Files }} -{{- if .Values.enableV1Beta1APIs }} {{ $files.Get "crdbases/placement.kubernetes-fleet.io_works.yaml" }} -{{- end }} diff --git a/charts/hub-agent/templates/deployment.yaml b/charts/hub-agent/templates/deployment.yaml index 276462f67..23fd4197e 100644 --- a/charts/hub-agent/templates/deployment.yaml +++ b/charts/hub-agent/templates/deployment.yaml @@ -20,7 +20,7 @@ spec: spec: serviceAccountName: {{ include "hub-agent.fullname" . }}-sa initContainers: - {{- if and .Values.crdInstaller.enabled .Values.enableV1Beta1APIs }} + {{- if .Values.crdInstaller.enabled }} - name: crd-installer image: "{{ .Values.crdInstaller.image.repository }}:{{ .Values.crdInstaller.image.tag }}" imagePullPolicy: {{ .Values.crdInstaller.image.pullPolicy }} @@ -43,7 +43,6 @@ spec: - --webhook-client-connection-type={{.Values.webhookClientConnectionType}} - --v={{ .Values.logVerbosity }} - -add_dir_header - - --enable-v1beta1-apis={{ .Values.enableV1Beta1APIs }} - --enable-cluster-inventory-apis={{ .Values.enableClusterInventoryAPI }} - --enable-staged-update-run-apis={{ .Values.enableStagedUpdateRunAPIs }} - --enable-eviction-apis={{ .Values.enableEvictionAPIs}} diff --git a/charts/hub-agent/values.yaml b/charts/hub-agent/values.yaml index 08da6311e..90c1106a6 100644 --- a/charts/hub-agent/values.yaml +++ b/charts/hub-agent/values.yaml @@ -53,7 +53,6 @@ tolerations: [] affinity: {} -enableV1Beta1APIs: true enableClusterInventoryAPI: true enableStagedUpdateRunAPIs: true enableEvictionAPIs: true diff --git a/charts/member-agent/templates/deployment.yaml b/charts/member-agent/templates/deployment.yaml index 91a7bc6a5..6aad28fdd 100644 --- a/charts/member-agent/templates/deployment.yaml +++ b/charts/member-agent/templates/deployment.yaml @@ -17,7 +17,7 @@ spec: restartPolicy: Always serviceAccountName: {{ include "member-agent.fullname" . }}-sa initContainers: - {{- if and .Values.crdInstaller.enabled .Values.enableV1Beta1APIs }} + {{- if .Values.crdInstaller.enabled }} - name: crd-installer image: "{{ .Values.crdInstaller.image.repository }}:{{ .Values.crdInstaller.image.tag }}" imagePullPolicy: {{ .Values.crdInstaller.image.pullPolicy }} @@ -41,7 +41,6 @@ spec: {{- end }} - --v={{ .Values.logVerbosity }} - -add_dir_header - - --enable-v1beta1-apis={{ .Values.enableV1Beta1APIs }} - --enable-pprof={{ .Values.enablePprof }} - --pprof-port={{ .Values.pprofPort }} - --hub-pprof-port={{ .Values.hubPprofPort }} diff --git a/charts/member-agent/values.yaml b/charts/member-agent/values.yaml index 6f4ec1176..eab5ca2f2 100644 --- a/charts/member-agent/values.yaml +++ b/charts/member-agent/values.yaml @@ -68,8 +68,6 @@ azure: tlsClientInsecure: true #TODO should be false in the production useCAAuth: false -enableV1Beta1APIs: true - enablePprof: true pprofPort: 6065 hubPprofPort: 6066 diff --git a/cmd/hubagent/options/options_test.go b/cmd/hubagent/options/options_test.go index c0be50f81..c45efeb20 100644 --- a/cmd/hubagent/options/options_test.go +++ b/cmd/hubagent/options/options_test.go @@ -707,6 +707,123 @@ func TestPlacementManagementOptions(t *testing.T) { } } +// TestWebhookOptions tests the parsing and validation logic of the webhook options defined in WebhookOptions. +func TestWebhookOptions(t *testing.T) { + testCases := []struct { + name string + flagSetName string + args []string + wantWebhookOpts WebhookOptions + wantErred bool + wantErrMsgSubStr string + }{ + { + name: "all default", + flagSetName: "allDefault", + args: []string{}, + wantWebhookOpts: WebhookOptions{ + EnableWebhooks: true, + ClientConnectionType: "url", + ServiceName: "fleetwebhook", + EnableGuardRail: false, + GuardRailWhitelistedUsers: "", + GuardRailDenyModifyMemberClusterLabels: false, + EnableWorkload: false, + UseCertManager: false, + }, + }, + { + name: "all specified", + flagSetName: "allSpecified", + args: []string{ + "--enable-webhook=false", + "--webhook-client-connection-type=service", + "--webhook-service-name=customwebhook", + "--enable-guard-rail=true", + "--whitelisted-users=user1,user2", + "--deny-modify-member-cluster-labels=true", + "--enable-workload=true", + "--use-cert-manager=true", + }, + wantWebhookOpts: WebhookOptions{ + EnableWebhooks: false, + ClientConnectionType: "service", + ServiceName: "customwebhook", + EnableGuardRail: true, + GuardRailWhitelistedUsers: "user1,user2", + GuardRailDenyModifyMemberClusterLabels: true, + EnableWorkload: true, + UseCertManager: true, + }, + }, + { + name: "webhook client connection type URL (case-insensitive)", + flagSetName: "webhookClientConnTypeURL", + args: []string{"--webhook-client-connection-type=URL"}, + wantWebhookOpts: WebhookOptions{ + EnableWebhooks: true, + ClientConnectionType: "url", + ServiceName: "fleetwebhook", + EnableGuardRail: false, + GuardRailWhitelistedUsers: "", + GuardRailDenyModifyMemberClusterLabels: false, + EnableWorkload: false, + UseCertManager: false, + }, + }, + { + name: "webhook client connection type service (case-insensitive)", + flagSetName: "webhookClientConnTypeService", + args: []string{"--webhook-client-connection-type=Service"}, + wantWebhookOpts: WebhookOptions{ + EnableWebhooks: true, + ClientConnectionType: "service", + ServiceName: "fleetwebhook", + EnableGuardRail: false, + GuardRailWhitelistedUsers: "", + GuardRailDenyModifyMemberClusterLabels: false, + EnableWorkload: false, + UseCertManager: false, + }, + }, + { + name: "invalid webhook client connection type", + flagSetName: "webhookClientConnTypeInvalid", + args: []string{"--webhook-client-connection-type=ftp"}, + wantErred: true, + wantErrMsgSubStr: "invalid webhook client connection type", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + flags := flag.NewFlagSet(tc.flagSetName, flag.ContinueOnError) + webhookOpts := WebhookOptions{} + webhookOpts.AddFlags(flags) + + err := flags.Parse(tc.args) + if tc.wantErred { + if err == nil { + t.Fatalf("flag Parse() = nil, want erred") + } + + if !strings.Contains(err.Error(), tc.wantErrMsgSubStr) { + t.Fatalf("flag Parse() error = %v, want error msg with sub-string %s", err, tc.wantErrMsgSubStr) + } + return + } + + if err != nil { + t.Fatalf("flag Parse() = %v, want nil", err) + } + + if diff := cmp.Diff(webhookOpts, tc.wantWebhookOpts); diff != "" { + t.Errorf("webhook options diff (-got, +want):\n%s", diff) + } + }) + } +} + // TestRateLimitOptions tests the parsing and validation logic of the rate limit options defined in RateLimitOptions. func TestRateLimitOptions(t *testing.T) { testCases := []struct { diff --git a/cmd/hubagent/options/webhooks.go b/cmd/hubagent/options/webhooks.go index efa23aa6c..f34bd7423 100644 --- a/cmd/hubagent/options/webhooks.go +++ b/cmd/hubagent/options/webhooks.go @@ -71,22 +71,10 @@ func (o *WebhookOptions) AddFlags(flags *flag.FlagSet) { "Enable the KubeFleet webhooks or not.", ) - flags.Func( + flags.Var( + newWebhookClientConnTypeValueWithValidation(string(URL), &o.ClientConnectionType), "webhook-client-connection-type", "The connection type used by the webhook client. Valid values are `url` and `service`. Defaults to `url`. This option only applies if webhooks are enabled.", - func(s string) error { - if len(s) == 0 { - o.ClientConnectionType = "url" - return nil - } - - parsedStr, err := parseWebhookClientConnectionString(s) - if err != nil { - return fmt.Errorf("invalid webhook client connection type: %w", err) - } - o.ClientConnectionType = string(parsedStr) - return nil - }, ) flags.StringVar( @@ -131,3 +119,28 @@ func (o *WebhookOptions) AddFlags(flags *flag.FlagSet) { "Use the cert-manager project for managing KubeFleet webhook server certificates or not. If set to false, the system will use self-signed certificates. If set to true, the EnableWorkload option must be set to true as well. This option only applies if webhooks are enabled.", ) } + +type WebhookClientConnTypeValueWithValidation string + +func (v *WebhookClientConnTypeValueWithValidation) String() string { + return string(*v) +} + +func (v *WebhookClientConnTypeValueWithValidation) Set(s string) error { + if len(s) == 0 { + *v = "url" + return nil + } + + parsedStr, err := parseWebhookClientConnectionString(s) + if err != nil { + return fmt.Errorf("invalid webhook client connection type: %w", err) + } + *v = WebhookClientConnTypeValueWithValidation(parsedStr) + return nil +} + +func newWebhookClientConnTypeValueWithValidation(defaultVal string, p *string) *WebhookClientConnTypeValueWithValidation { + *p = defaultVal + return (*WebhookClientConnTypeValueWithValidation)(p) +} diff --git a/hack/Azure/property-based-scheduling.md b/hack/Azure/property-based-scheduling.md index c8f271362..bedb04d42 100644 --- a/hack/Azure/property-based-scheduling.md +++ b/hack/Azure/property-based-scheduling.md @@ -123,7 +123,6 @@ helm install hub-agent charts/hub-agent/ \ --set namespace=fleet-system \ --set enableWebhook=true \ --set webhookClientConnectionType=service \ - --set enableV1Beta1APIs=true ``` It will take a few moments to complete the installation. After the command returns, verify that the Fleet hub agent is up and running with this command: @@ -185,7 +184,6 @@ do --set config.memberClusterName="${MEMBER_CLUSTERS[$i]}" \ --set logVerbosity=5 \ --set namespace=fleet-system \ - --set enableV1Beta1APIs=true \ --set propertyProvider=$PROPERTY_PROVIDER done ``` diff --git a/hack/Azure/setup/createHubCluster.sh b/hack/Azure/setup/createHubCluster.sh index 388bd806c..9d37a1bc8 100755 --- a/hack/Azure/setup/createHubCluster.sh +++ b/hack/Azure/setup/createHubCluster.sh @@ -30,7 +30,6 @@ helm install hub-agent charts/hub-agent/ \ --set namespace=fleet-system \ --set enableWebhook=false \ --set webhookClientConnectionType=service \ - --set enableV1Beta1APIs=true \ --set clusterUnhealthyThreshold="3m0s" \ --set forceDeleteWaitTime="1m0s" \ --set resources.limits.cpu=4 \ diff --git a/hack/Azure/setup/joinMC.sh b/hack/Azure/setup/joinMC.sh index 899c54819..f45b83559 100755 --- a/hack/Azure/setup/joinMC.sh +++ b/hack/Azure/setup/joinMC.sh @@ -88,8 +88,7 @@ helm install member-agent charts/member-agent/ \ --set refreshtoken.pullPolicy=Always \ --set config.memberClusterName=$MEMBER_CLUSTER \ --set logVerbosity=5 \ - --set namespace=fleet-system \ - --set enableV1Beta1APIs=true + --set namespace=fleet-system kubectl get pods -A kubectl config use-context $HUB_CLUSTER_CONTEXT diff --git a/hack/membership/join.sh b/hack/membership/join.sh index 695950bb9..f84765d51 100755 --- a/hack/membership/join.sh +++ b/hack/membership/join.sh @@ -126,5 +126,4 @@ helm install member-agent charts/member-agent/ \ --set refreshtoken.pullPolicy=Never \ --set config.memberClusterName="$MEMBER_CLUSTER" \ --set logVerbosity=6 \ - --set namespace=fleet-system \ - --set enableV1Beta1APIs=true + --set namespace=fleet-system diff --git a/hack/membership/joinMC.sh b/hack/membership/joinMC.sh index 7a4ac4224..38c524fda 100755 --- a/hack/membership/joinMC.sh +++ b/hack/membership/joinMC.sh @@ -122,8 +122,7 @@ helm upgrade --install member-agent charts/member-agent/ \ --set refreshtoken.pullPolicy=Always \ --set config.memberClusterName=$MEMBER_CLUSTER \ --set logVerbosity=5 \ - --set namespace=fleet-system \ - --set enableV1Beta1APIs=true + --set namespace=fleet-system kubectl get pods -A kubectl config use-context $HUB_CLUSTER_CONTEXT diff --git a/pkg/utils/controller/resource_selector_resolver.go b/pkg/utils/controller/resource_selector_resolver.go index 1dcfa0d13..a405917c1 100644 --- a/pkg/utils/controller/resource_selector_resolver.go +++ b/pkg/utils/controller/resource_selector_resolver.go @@ -248,22 +248,18 @@ func (rs *ResourceSelectorResolver) gatherSelectedResource(placementKey types.Na Kind: selector.Kind, } if gvk == utils.NamespaceGVK && placementKey.Namespace == "" && selector.SelectionScope == placementv1beta1.NamespaceWithResourceSelectors { - namespaces, err := rs.fetchSelectedNamespaces(selector, placementKey.Name) + namespace, found, err := rs.fetchSelectedNamespace(selector, placementKey.Name) if err != nil { return nil, err } // NamespaceWithResourceSelectors mode requires exactly one namespace - if len(namespaces) == 0 { + if !found { err := fmt.Errorf("invalid clusterResourcePlacement %s: NamespaceWithResourceSelectors mode requires exactly one namespace, but no namespaces were selected", placementKey.Name) klog.ErrorS(err, "No namespaces selected with NamespaceWithResourceSelectors mode", "placement", placementKey.Name) return nil, NewUserError(err) } - if len(namespaces) > 1 { - err := fmt.Errorf("invalid clusterResourcePlacement %s: NamespaceWithResourceSelectors mode requires exactly one namespace, but %d namespaces were selected", placementKey.Name, len(namespaces)) - klog.ErrorS(err, "Multiple namespaces selected with NamespaceWithResourceSelectors mode", "placement", placementKey.Name, "namespaceCount", len(namespaces)) - return nil, NewUserError(err) - } - selectedNamespace = namespaces[0] + // Note: CEL validation ensures that at most one namespace can be selected. + selectedNamespace = namespace break } } @@ -591,32 +587,37 @@ func (rs *ResourceSelectorResolver) fetchResources(selector placementv1beta1.Res return selectedObjs, nil } -// fetchSelectedNamespaces retrieves the namespace names based on the namespace selector. -func (rs *ResourceSelectorResolver) fetchSelectedNamespaces(selector placementv1beta1.ResourceSelectorTerm, placementName string) ([]string, error) { - klog.V(2).InfoS("start to fetch selected namespaces", "selector", selector, "placement", placementName) +// fetchSelectedNamespace retrieves the namespace name based on the namespace selector. +// This function assumes the selector is a name-based selector (not label-based) as guaranteed by CEL validation. +// Returns the namespace name if found and not skipped, or empty string if not found/skipped. +func (rs *ResourceSelectorResolver) fetchSelectedNamespace(selector placementv1beta1.ResourceSelectorTerm, placementName string) (string, bool, error) { + klog.V(2).InfoS("start to fetch selected namespace", "selector", selector, "placement", placementName) // Use fetchResources to get namespace objects (handles label selector, name selector, deletion timestamps) placementKey := types.NamespacedName{Name: placementName} objs, err := rs.fetchResources(selector, placementKey) if err != nil { - return nil, err + return "", false, err } - // Filter by skipped namespaces and extract names - namespaceNames := make([]string, 0, len(objs)) - for _, obj := range objs { - ns, err := meta.Accessor(obj) - if err != nil { - return nil, NewUnexpectedBehaviorError(fmt.Errorf("failed to access the namespace object: %w", err)) - } - if !utils.ShouldPropagateNamespace(ns.GetName(), rs.SkippedNamespaces) { - klog.V(2).InfoS("skip namespace that is not allowed to propagate", "namespace", ns.GetName(), "placement", placementName) - continue - } - namespaceNames = append(namespaceNames, ns.GetName()) + // Since CEL validation ensures name-based selection only, we expect at most 1 namespace object + if len(objs) == 0 { + return "", false, nil + } + + // We should have exactly 1 namespace object due to CEL validation constraints + ns, err := meta.Accessor(objs[0]) + if err != nil { + return "", false, NewUnexpectedBehaviorError(fmt.Errorf("failed to access the namespace object: %w", err)) + } + + // Check if this namespace should be propagated + if !utils.ShouldPropagateNamespace(ns.GetName(), rs.SkippedNamespaces) { + klog.V(2).InfoS("skip namespace that is not allowed to propagate", "namespace", ns.GetName(), "placement", placementName) + return "", false, nil } - return namespaceNames, nil + return ns.GetName(), true, nil } func (rs *ResourceSelectorResolver) ShouldPropagateObj(namespace, placementName string, obj runtime.Object) (bool, error) { diff --git a/pkg/utils/controller/resource_selector_resolver_test.go b/pkg/utils/controller/resource_selector_resolver_test.go index 2abeca5d5..6c197f6ec 100644 --- a/pkg/utils/controller/resource_selector_resolver_test.go +++ b/pkg/utils/controller/resource_selector_resolver_test.go @@ -1493,56 +1493,6 @@ func TestGatherSelectedResource(t *testing.T) { // Should select only the namespace, not its child resources want: []*unstructured.Unstructured{testNamespace}, }, - { - name: "should error when selecting multiple namespaces with NamespaceWithResourceSelectors mode using label selector", - placementName: types.NamespacedName{Name: "test-placement"}, - selectors: []fleetv1beta1.ResourceSelectorTerm{ - { - Group: "", - Version: "v1", - Kind: "Namespace", - LabelSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "environment": "test", - }, - }, - SelectionScope: fleetv1beta1.NamespaceWithResourceSelectors, - }, - { - Group: "apps", - Version: "v1", - Kind: "Deployment", - Name: "test-deployment", - }, - }, - resourceConfig: utils.NewResourceConfig(false), // default deny list - informerManager: func() *testinformer.FakeManager { - // Create a second test namespace with the same label - testNamespace2 := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "v1", - "kind": "Namespace", - "metadata": map[string]interface{}{ - "name": "test-ns-2", - "labels": map[string]interface{}{ - "environment": "test", - }, - }, - }, - } - testNamespace2.SetGroupVersionKind(utils.NamespaceGVK) - - return &testinformer.FakeManager{ - Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ - utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace, testNamespace2}}, - utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment}}, - }, - NamespaceScopedResources: []schema.GroupVersionResource{utils.DeploymentGVR}, - } - }(), - // Should error because multiple namespaces match the label selector - wantError: ErrUserError, - }, { name: "should error when selecting zero namespaces with NamespaceWithResourceSelectors mode", placementName: types.NamespacedName{Name: "test-placement"}, @@ -2201,7 +2151,7 @@ func TestSortResources(t *testing.T) { } } -func TestFetchSelectedNamespaces(t *testing.T) { +func TestFetchSelectedNamespace(t *testing.T) { testNs1 := &unstructured.Unstructured{ Object: map[string]interface{}{ "apiVersion": "v1", @@ -2246,7 +2196,8 @@ func TestFetchSelectedNamespaces(t *testing.T) { selector fleetv1beta1.ResourceSelectorTerm skippedNamespaces map[string]bool informerManager *testinformer.FakeManager - want []string + want string + wantFound bool wantErr bool }{ { @@ -2263,25 +2214,8 @@ func TestFetchSelectedNamespaces(t *testing.T) { utils.NamespaceGVR: {Objects: []runtime.Object{testNs1, testNs2}}, }, }, - want: []string{"test-ns-1"}, - }, - { - name: "select namespaces by label selector", - selector: fleetv1beta1.ResourceSelectorTerm{ - Group: "", - Version: "v1", - Kind: "Namespace", - LabelSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"env": "prod"}, - }, - }, - skippedNamespaces: nil, - informerManager: &testinformer.FakeManager{ - Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ - utils.NamespaceGVR: {Objects: []runtime.Object{testNs1, testNs2}}, - }, - }, - want: []string{"test-ns-1"}, + want: "test-ns-1", + wantFound: true, }, { name: "filter out skipped namespaces", @@ -2289,17 +2223,16 @@ func TestFetchSelectedNamespaces(t *testing.T) { Group: "", Version: "v1", Kind: "Namespace", - LabelSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{}, - }, + Name: "kube-system", // Use name-based selection instead of label selector }, skippedNamespaces: map[string]bool{"kube-system": true}, informerManager: &testinformer.FakeManager{ Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ - utils.NamespaceGVR: {Objects: []runtime.Object{testNs1, skippedNs}}, + utils.NamespaceGVR: {Objects: []runtime.Object{testNs1, testNs2, skippedNs}}, }, }, - want: []string{"test-ns-1"}, + want: "", // Empty because kube-system is skipped + wantFound: false, }, { name: "no namespaces match selector", @@ -2315,7 +2248,8 @@ func TestFetchSelectedNamespaces(t *testing.T) { utils.NamespaceGVR: {Objects: []runtime.Object{testNs1, testNs2}}, }, }, - want: []string{}, + want: "", + wantFound: false, }, } @@ -2328,13 +2262,16 @@ func TestFetchSelectedNamespaces(t *testing.T) { RestMapper: newFakeRESTMapper(), } - got, err := rsr.fetchSelectedNamespaces(tt.selector, "test-placement") + gotNamespace, gotFound, err := rsr.fetchSelectedNamespace(tt.selector, "test-placement") if (err != nil) != tt.wantErr { - t.Errorf("fetchSelectedNamespaces() error = %v, wantErr %v", err, tt.wantErr) + t.Errorf("fetchSelectedNamespace() error = %v, wantErr %v", err, tt.wantErr) return } - if diff := cmp.Diff(tt.want, got); diff != "" { - t.Errorf("fetchSelectedNamespaces() mismatch (-want +got):\n%s", diff) + if gotNamespace != tt.want { + t.Errorf("fetchSelectedNamespace() namespace = %v, want %v", gotNamespace, tt.want) + } + if gotFound != tt.wantFound { + t.Errorf("fetchSelectedNamespace() found = %v, wantFound %v", gotFound, tt.wantFound) } }) } diff --git a/test/e2e/setup.sh b/test/e2e/setup.sh index 59793295c..4637dd96f 100755 --- a/test/e2e/setup.sh +++ b/test/e2e/setup.sh @@ -223,7 +223,6 @@ do --set config.memberClusterName="kind-${MEMBER_CLUSTERS[$i]}" \ --set logVerbosity=5 \ --set namespace=fleet-system \ - --set enableV1Beta1APIs=true \ --set priorityQueue.enabled=true \ --set workApplierRequeueRateLimiterMaxSlowBackoffDelaySeconds=5 \ --set workApplierRequeueRateLimiterMaxFastBackoffDelaySeconds=5 \ @@ -246,7 +245,6 @@ do --set config.memberClusterName="kind-${MEMBER_CLUSTERS[$i]}" \ --set logVerbosity=5 \ --set namespace=fleet-system \ - --set enableV1Beta1APIs=true \ --set priorityQueue.enabled=true \ --set workApplierRequeueRateLimiterMaxSlowBackoffDelaySeconds=5 \ --set workApplierRequeueRateLimiterMaxFastBackoffDelaySeconds=5 \ diff --git a/test/upgrade/setup.sh b/test/upgrade/setup.sh index ab6e8f073..7f5d31576 100755 --- a/test/upgrade/setup.sh +++ b/test/upgrade/setup.sh @@ -137,8 +137,7 @@ do --set refreshtoken.pullPolicy=Never \ --set config.memberClusterName="kind-${MEMBER_CLUSTERS[$i]}" \ --set logVerbosity=5 \ - --set namespace=fleet-system \ - --set enableV1Beta1APIs=true + --set namespace=fleet-system done echo "Setup for the before upgrade environment has been completed." diff --git a/test/upgrade/upgrade.sh b/test/upgrade/upgrade.sh index c03642b27..831e45255 100755 --- a/test/upgrade/upgrade.sh +++ b/test/upgrade/upgrade.sh @@ -99,7 +99,6 @@ if [ -n "$UPGRADE_MEMBER_SIDE" ]; then --set refreshtoken.pullPolicy=Never \ --set config.memberClusterName="kind-${MEMBER_CLUSTERS[$i]}" \ --set logVerbosity=5 \ - --set namespace=fleet-system \ - --set enableV1Beta1APIs=true + --set namespace=fleet-system done fi