diff --git a/pkg/app/pipedv1/plugin/kubernetes_multicluster/deployment/misc.go b/pkg/app/pipedv1/plugin/kubernetes_multicluster/deployment/misc.go index d73b587eb0..233d146c13 100644 --- a/pkg/app/pipedv1/plugin/kubernetes_multicluster/deployment/misc.go +++ b/pkg/app/pipedv1/plugin/kubernetes_multicluster/deployment/misc.go @@ -282,3 +282,36 @@ func deleteResources(ctx context.Context, lp sdk.StageLogPersister, applier *pro return deletedCount } + +// findOrphanedKeys returns the keys of resources present in targetManifests +// but absent from runningManifests. +func findOrphanedKeys(runningManifests, targetManifests []provider.Manifest) []provider.ResourceKey { + runningKeys := make(map[provider.ResourceKey]struct{}, len(runningManifests)) + for _, m := range runningManifests { + runningKeys[m.Key()] = struct{}{} + } + + orphans := make([]provider.ResourceKey, 0) + for _, m := range targetManifests { + if _, exists := runningKeys[m.Key()]; !exists { + orphans = append(orphans, m.Key()) + } + } + return orphans +} + +// pruneOrphanedResources deletes resources that exist in targetManifests but not in runningManifests. +// This handles the case where a new resource was applied during the failed deployment and must be +// removed during rollback to restore the cluster to the last known good state. +func pruneOrphanedResources(ctx context.Context, lp sdk.StageLogPersister, applier *provider.Applier, runningManifests, targetManifests []provider.Manifest) { + orphans := findOrphanedKeys(runningManifests, targetManifests) + + if len(orphans) == 0 { + lp.Info("No orphaned resources to prune") + return + } + + lp.Infof("Found %d orphaned resource(s) to prune", len(orphans)) + deleted := deleteResources(ctx, lp, applier, orphans) + lp.Successf("Successfully pruned %d orphaned resource(s)", deleted) +} diff --git a/pkg/app/pipedv1/plugin/kubernetes_multicluster/deployment/misc_test.go b/pkg/app/pipedv1/plugin/kubernetes_multicluster/deployment/misc_test.go index c175608730..40b9d64ff8 100644 --- a/pkg/app/pipedv1/plugin/kubernetes_multicluster/deployment/misc_test.go +++ b/pkg/app/pipedv1/plugin/kubernetes_multicluster/deployment/misc_test.go @@ -23,6 +23,159 @@ import ( "github.com/pipe-cd/pipecd/pkg/app/pipedv1/plugin/kubernetes_multicluster/provider" ) +func TestFindOrphanedKeys(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + runningManifests string + targetManifests string + wantCount int + wantNames []string + }{ + { + name: "no orphans when target equals running", + runningManifests: ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: app + namespace: default +`, + targetManifests: ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: app + namespace: default +`, + wantCount: 0, + wantNames: []string{}, + }, + { + name: "all target resources are orphaned when running is empty", + runningManifests: "", + targetManifests: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: new-config + namespace: default +`, + wantCount: 1, + wantNames: []string{"new-config"}, + }, + { + name: "new resource in target not present in running is orphaned", + runningManifests: ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: app + namespace: default +`, + targetManifests: ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: app + namespace: default +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: new-config + namespace: default +`, + wantCount: 1, + wantNames: []string{"new-config"}, + }, + { + name: "resource only in running is not pruned", + runningManifests: ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: app + namespace: default +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: old-config + namespace: default +`, + targetManifests: ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: app + namespace: default +`, + wantCount: 0, + wantNames: []string{}, + }, + { + name: "multiple orphaned resources", + runningManifests: ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: app + namespace: default +`, + targetManifests: ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: app + namespace: default +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: new-config + namespace: default +--- +apiVersion: v1 +kind: Service +metadata: + name: new-svc + namespace: default +`, + wantCount: 2, + wantNames: []string{"new-config", "new-svc"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + var running []provider.Manifest + if tt.runningManifests != "" { + running = mustParseManifests(t, tt.runningManifests) + } + var target []provider.Manifest + if tt.targetManifests != "" { + target = mustParseManifests(t, tt.targetManifests) + } + + orphans := findOrphanedKeys(running, target) + + assert.Len(t, orphans, tt.wantCount) + + gotNames := make([]string, 0, len(orphans)) + for _, k := range orphans { + gotNames = append(gotNames, k.Name()) + } + for _, name := range tt.wantNames { + assert.Contains(t, gotNames, name) + } + }) + } +} + func TestCheckVariantSelectorInWorkload(t *testing.T) { t.Parallel() diff --git a/pkg/app/pipedv1/plugin/kubernetes_multicluster/deployment/rollback.go b/pkg/app/pipedv1/plugin/kubernetes_multicluster/deployment/rollback.go index 9fc8f4c222..c681f97ec2 100644 --- a/pkg/app/pipedv1/plugin/kubernetes_multicluster/deployment/rollback.go +++ b/pkg/app/pipedv1/plugin/kubernetes_multicluster/deployment/rollback.go @@ -210,8 +210,18 @@ func (p *Plugin) rollback(ctx context.Context, input *sdk.ExecuteStageInput[kube failed = true } - // TODO: prune resources which don't exist in the running manifests but exist in the target manifests. - // This occurs when the user adds a new resource and the deployment pipeline fails. + lp.Info("Start pruning resources that do not exist in the running manifests") + targetCfg, err := input.Request.TargetDeploymentSource.AppConfig() + if err != nil { + lp.Infof("Failed to load target app config for pruning, skipping: %v", err) + } else { + targetManifests, err := p.loadManifests(ctx, &input.Request.Deployment, targetCfg.Spec, &input.Request.TargetDeploymentSource, provider.NewLoader(toolRegistry), input.Logger, multiTarget) + if err != nil { + lp.Infof("Failed to load target manifests for pruning, skipping: %v", err) + } else { + pruneOrphanedResources(ctx, lp, applier, manifests, targetManifests) + } + } if failed { return sdk.StageStatusFailure