Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
45 changes: 45 additions & 0 deletions pkg/controller/plan/adapter/base/doc.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,51 @@ const (
// Related to https://github.com/kubevirt/containerized-data-importer/pull/3572
AnnVddkExtraArgs = "cdi.kubevirt.io/storage.pod.vddk.extraargs"

// CDI import backing file annotation on PVC
AnnImportBackingFile = "cdi.kubevirt.io/storage.import.backingFile"

// Source URL, on PVC
AnnEndpoint = "cdi.kubevirt.io/storage.import.endpoint"

// Secret name for source credentials, on PVC
AnnSecret = "cdi.kubevirt.io/storage.import.secretName"

// VM UUID, on PVC
AnnUUID = "cdi.kubevirt.io/storage.import.uuid"

// VDDK-specific thumbprint
AnnThumbprint = "cdi.kubevirt.io/storage.import.vddk.thumbprint"

// VDDK image, on PVC
AnnVddkInitImageURL = "cdi.kubevirt.io/storage.pod.vddk.initimageurl"

// Importer pod progress phase, on PVC
AnnPodPhase = "cdi.kubevirt.io/storage.pod.phase"

// True if the current checkpoint is the one taken for the cutover, on PVC
AnnFinalCheckpoint = "cdi.kubevirt.io/storage.checkpoint.final"

// Current checkpoint reference, on PVC
AnnCurrentCheckpoint = "cdi.kubevirt.io/storage.checkpoint.current"

// Previous checkpoint reference, on PVC
AnnPreviousCheckpoint = "cdi.kubevirt.io/storage.checkpoint.previous"

// Not a whole annotation but a prefix, append a snapshot name to mark that the snapshot was already copied (on PVC)
AnnCheckpointsCopied = "cdi.kubevirt.io/storage.checkpoint.copied"

// Allow DataVolume to adopt a PVC, on DataVolume
AnnAllowClaimAdoption = "cdi.kubevirt.io/allowClaimAdoption"

// Inform CDI that the DataVolume is already filled up, on DataVolume
AnnPrePopulated = "cdi.kubevirt.io/storage.prePopulated"

// Tell CDI which importer to use, on PVC
AnnSource = "cdi.kubevirt.io/storage.import.source"

// Name of the current importer pod, on PVC
AnnImportPod = "cdi.kubevirt.io/storage.import.importPod"

Comment thread
mrnold marked this conversation as resolved.
// In a UDN namespace we can't directly reach the virt-v2v pod unless we specify default opened ports on the pod network.
AnnOpenDefaultPorts = "k8s.ovn.org/open-default-ports"

Expand Down
158 changes: 123 additions & 35 deletions pkg/controller/plan/adapter/vsphere/builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,12 +91,6 @@ const (
WindowsPrefix = "win"
)

// Annotations
const (
// CDI import backing file annotation on PVC
AnnImportBackingFile = "cdi.kubevirt.io/storage.import.backingFile"
)

const (
Shareable = "shareable"
)
Expand Down Expand Up @@ -524,6 +518,37 @@ func (r *Builder) DataVolumes(vmRef ref.Ref, secret *core.Secret, _ *core.Config
return
}

// For storage offload warm migrations, match this DataVolume to the
// existing PVC via the backing file name.
var pvcMap map[string]core.PersistentVolumeClaim
if r.Plan.Spec.Warm && r.SupportsVolumePopulators(vmRef) {
pvcMap = make(map[string]core.PersistentVolumeClaim)
pvcs := &core.PersistentVolumeClaimList{}
pvcLabels := map[string]string{
"vmID": vmRef.ID,
"migration": string(r.Migration.UID),
}

err = r.Context.Destination.Client.List(
context.TODO(),
pvcs,
&client.ListOptions{
Namespace: r.Plan.Spec.TargetNamespace,
LabelSelector: labels.SelectorFromSet(pvcLabels),
},
)
if err != nil {
err = liberr.Wrap(err)
return
}

for _, pvc := range pvcs.Items {
if copyOffload, present := pvc.Annotations["copy-offload"]; present && copyOffload != "" {
pvcMap[baseVolume(copyOffload, r.Plan.Spec.Warm)] = pvc
}
}
}
Comment thread
mrnold marked this conversation as resolved.

// Sort disks by bus, so we can match the disk index to the boot order.
// Important: need to match order in mapDisks method
disks := r.sortedDisksAsVmware(vm.Disks)
Expand Down Expand Up @@ -597,9 +622,17 @@ func (r *Builder) DataVolumes(vmRef ref.Ref, secret *core.Secret, _ *core.Config
// matching the disk and PVC index.
dv.ObjectMeta.Annotations[planbase.AnnDiskIndex] = fmt.Sprintf("%d", diskIndex)

// Set PVC name/generateName using template if configured
if err := r.setPVCNameFromTemplate(&dv.ObjectMeta, vm, diskIndex, disk); err != nil {
r.Log.Info("Failed to set PVC name from template", "error", err)
if pvcMap != nil && dvSource.VDDK != nil {
// In a warm migration with storage offload, the PVC has already been created with
// the name template. Copy the result to the DataVolume so it can adopt the PVC.
if pvc, present := pvcMap[dvSource.VDDK.BackingFile]; present {
dv.ObjectMeta.Name = pvc.Name
}
} else {
// Set PVC name/generateName using template if configured
if err := r.setPVCNameFromTemplate(&dv.ObjectMeta, vm, diskIndex, disk); err != nil {
r.Log.Info("Failed to set PVC name from template", "error", err)
}
}

if !useV2vForTransfer && vddkConfigMap != nil {
Expand Down Expand Up @@ -937,7 +970,7 @@ func (r *Builder) mapDisks(vm *model.VM, vmRef ref.Ref, persistentVolumeClaims [
if source, ok := pvc.Annotations[planbase.AnnDiskSource]; ok {
pvcMap[trimBackingFileName(source)] = pvc
} else {
pvcMap[trimBackingFileName(pvc.Annotations[AnnImportBackingFile])] = pvc
pvcMap[trimBackingFileName(pvc.Annotations[planbase.AnnImportBackingFile])] = pvc
}
}

Expand Down Expand Up @@ -1125,7 +1158,7 @@ func (r *Builder) ResolveDataVolumeIdentifier(dv *cdi.DataVolume) string {

// Return a stable identifier for a PersistentDataVolume.
func (r *Builder) ResolvePersistentVolumeClaimIdentifier(pvc *core.PersistentVolumeClaim) string {
return baseVolume(pvc.Annotations[AnnImportBackingFile], r.Plan.Spec.Warm)
return baseVolume(pvc.Annotations[planbase.AnnImportBackingFile], r.Plan.Spec.Warm)
}
Comment thread
mrnold marked this conversation as resolved.

// Load
Expand Down Expand Up @@ -1239,7 +1272,7 @@ func (r *Builder) LunPersistentVolumeClaims(vmRef ref.Ref) (pvcs []core.Persiste
// Check whether the specific VM supports Volume Populators by examining only the datastores used by this VM.
// This prevents mixed configuration issues where some VMs have offload-capable datastores and others don't.
func (r *Builder) SupportsVolumePopulators(vmRef ref.Ref) bool {
if !settings.Settings.Features.CopyOffload || r.Plan.Spec.Warm {
if !settings.Settings.Features.CopyOffload {
return false
}

Expand Down Expand Up @@ -1300,6 +1333,24 @@ func (r *Builder) PopulatorVolumes(vmRef ref.Ref, annotations map[string]string,
return
}

// Get a list of existing PVCs to avoid creating duplicates
pvcLabels := map[string]string{
"migration": string(r.Migration.UID),
"vmID": vmRef.ID,
}
pvcList := &core.PersistentVolumeClaimList{}
err = r.Destination.Client.List(
context.TODO(),
pvcList,
&client.ListOptions{
LabelSelector: labels.SelectorFromSet(pvcLabels),
Namespace: r.Plan.Spec.TargetNamespace,
})
if err != nil {
err = liberr.Wrap(err)
return
}

// Get sorted disks to maintain consistent indexing with other parts of the system
sortedDisks := r.sortedDisksAsVmware(vm.Disks)

Expand Down Expand Up @@ -1371,8 +1422,8 @@ func (r *Builder) PopulatorVolumes(vmRef ref.Ref, annotations map[string]string,
} else {
pvc.Annotations = annotations
}
pvc.Annotations[planbase.AnnDiskSource] = baseVolume(disk.File, false)
pvc.Annotations["copy-offload"] = baseVolume(disk.File, false)
pvc.Annotations[planbase.AnnDiskSource] = baseVolume(disk.File, r.Plan.Spec.Warm)
pvc.Annotations["copy-offload"] = baseVolume(disk.File, r.Plan.Spec.Warm)

Comment thread
mrnold marked this conversation as resolved.
// Apply PVC template naming if configured, replacing the commonName
if err := r.setColdMigrationDefaultPVCName(&pvc.ObjectMeta, vm, diskIndex, disk); err != nil {
Expand All @@ -1390,6 +1441,31 @@ func (r *Builder) PopulatorVolumes(vmRef ref.Ref, annotations map[string]string,
// populator name is the name of the populator, and we can't use generateName for the populator
populatorName := pvc.ObjectMeta.Name

// For warm migration, add annotations to jump-start the DataVolume
v := r.getPlanVMStatus(vm)
if v != nil && v.Warm != nil {
pvc.Annotations[planbase.AnnEndpoint] = r.Source.Provider.Spec.URL
pvc.Annotations[planbase.AnnImportBackingFile] = baseVolume(disk.File, r.Plan.Spec.Warm)
pvc.Annotations[planbase.AnnSecret] = secretName
pvc.Annotations[planbase.AnnUUID] = vm.UUID
pvc.Annotations[planbase.AnnThumbprint] = r.Source.Provider.Status.Fingerprint
pvc.Annotations[planbase.AnnVddkInitImageURL] = settings.GetVDDKImage(r.Source.Provider.Spec.Settings)
pvc.Annotations[planbase.AnnPodPhase] = "Succeeded"
pvc.Annotations[planbase.AnnSource] = "vddk"

n := len(v.Warm.Precopies)
if n > 0 { // Should be 1 at this point
snapshot := v.Warm.Precopies[n-1].Snapshot
pvc.Annotations[planbase.AnnFinalCheckpoint] = "false"
pvc.Annotations[planbase.AnnCurrentCheckpoint] = snapshot
pvc.Annotations[planbase.AnnPreviousCheckpoint] = ""

copied := fmt.Sprintf("%s.%s", planbase.AnnCheckpointsCopied, snapshot)
pvc.Annotations[copied] = "xcopy-initial-offload" // Any value should work here
pvc.Annotations[planbase.AnnImportPod] = "xcopy-initial-offload" // Should match above
}
}

// Update DataSourceRef to point to the volume populator
pvc.Spec.DataSourceRef.Name = populatorName

Expand All @@ -1403,7 +1479,7 @@ func (r *Builder) PopulatorVolumes(vmRef ref.Ref, annotations map[string]string,
},
Spec: api.VSphereXcopyVolumePopulatorSpec{
VmId: vmRef.ID,
VmdkPath: disk.File,
VmdkPath: baseVolume(disk.File, r.Plan.Spec.Warm),
SecretName: secretName,
StorageVendorProduct: string(storageVendorProduct),
},
Expand All @@ -1414,25 +1490,26 @@ func (r *Builder) PopulatorVolumes(vmRef ref.Ref, annotations map[string]string,
if err != nil {
return nil, fmt.Errorf("failed to merge secrets for popoulators %w", err)
}
// TODO should we handle if already exists due to re-entry? if the former
// reconcile was successful in creating the pvc but failed after that, e.g when
// creating the volumepopulator resouce failed
r.Log.Info("Creating pvc", "pvc", pvc)
err = r.Destination.Client.Create(context.TODO(), &pvc, &client.CreateOptions{})
if err != nil {
// ignore if already exists?
return nil, err
}

r.Log.Info("Ensuring a populator service account")
err := r.ensurePopulatorServiceAccount(namespace)
if err != nil {
return nil, err
}
r.Log.Info("Creating the populator resource", "VSphereXcopyVolumePopulator", vp)
err = r.Destination.Client.Create(context.TODO(), &vp, &client.CreateOptions{})
if err != nil {
return nil, err
if !r.isPVCExistsInList(&pvc, pvcList) {
r.Log.Info("Creating pvc", "pvc", pvc)
err = r.Destination.Client.Create(context.TODO(), &pvc, &client.CreateOptions{})
if err != nil {
if k8serr.IsAlreadyExists(err) {
continue
}
return nil, err
}
// Should probably check these separately
r.Log.Info("Ensuring a populator service account")
err := r.ensurePopulatorServiceAccount(namespace)
if err != nil {
return nil, err
}
r.Log.Info("Creating the populator resource", "VSphereXcopyVolumePopulator", vp)
err = r.Destination.Client.Create(context.TODO(), &vp, &client.CreateOptions{})
if err != nil {
return nil, err
}
}

}
Expand Down Expand Up @@ -1504,7 +1581,7 @@ func (r *Builder) SetPopulatorDataSourceLabels(vmRef ref.Ref, pvcs []*core.Persi

func (r *Builder) GetPopulatorTaskName(pvc *core.PersistentVolumeClaim) (taskName string, err error) {
// copy-offload only
taskName = pvc.Annotations[planbase.AnnDiskSource]
taskName = baseVolume(pvc.Annotations[planbase.AnnDiskSource], r.Plan.Spec.Warm)
return
}

Expand Down Expand Up @@ -1833,8 +1910,10 @@ func (r *Builder) mergeSecrets(migrationSecret, migrationSecretNS, storageVendor
dst.Data["GOVMOMI_HOSTNAME"] = []byte(h.Hostname())
case "user":
dst.Data["GOVMOMI_USERNAME"] = value
dst.Data["accessKeyId"] = value
case "password":
dst.Data["GOVMOMI_PASSWORD"] = value
dst.Data["secretKey"] = value
case "insecureSkipVerify":
dst.Data["GOVMOMI_INSECURE"] = value
}
Expand Down Expand Up @@ -2026,3 +2105,12 @@ func (r *Builder) addSSHKeysToSecret(secret *core.Secret) error {
r.Log.Info("SSH keys added to migration secret", "secret", secret.Name)
return nil
}

func (r *Builder) isPVCExistsInList(pvc *core.PersistentVolumeClaim, pvcList *core.PersistentVolumeClaimList) bool {
for _, item := range pvcList.Items {
if r.ResolvePersistentVolumeClaimIdentifier(pvc) == r.ResolvePersistentVolumeClaimIdentifier(&item) {
return true
}
}
return false
}
16 changes: 13 additions & 3 deletions pkg/controller/plan/kubevirt.go
Original file line number Diff line number Diff line change
Expand Up @@ -1399,16 +1399,26 @@ func (r *KubeVirt) dataVolumes(vm *plan.VMStatus, secret *core.Secret, configMap
// or vSphere, and in the latter case the conversion pod acts as the first-consumer
annotations[planbase.AnnBindImmediate] = "true"
}
if r.Plan.Spec.Warm && r.Builder.SupportsVolumePopulators(vm.Ref) {
// For storage offload, tie DataVolume to pre-imported PVC
annotations[planbase.AnnAllowClaimAdoption] = "true"
annotations[planbase.AnnPrePopulated] = "true"
}
// Do not delete the DV when the import completes as we check the DV to get the current
// disk transfer status.
annotations[AnnDeleteAfterCompletion] = "false"
dvTemplate := cdi.DataVolume{
ObjectMeta: meta.ObjectMeta{
Namespace: r.Plan.Spec.TargetNamespace,
Annotations: annotations,
GenerateName: r.getGeneratedName(vm),
Namespace: r.Plan.Spec.TargetNamespace,
Annotations: annotations,
},
}
if !(r.Builder.SupportsVolumePopulators(vm.Ref) && r.Plan.Spec.Warm) {
// For storage offload warm migrations, the template should have already
// been applied to the PVC that will be adopted by this DataVolume, so
// only add generateName for other migration types.
dvTemplate.ObjectMeta.GenerateName = r.getGeneratedName(vm)
}
dvTemplate.Labels = r.vmLabels(vm.Ref)

dataVolumes, err = r.Builder.DataVolumes(vm.Ref, secret, configMap, &dvTemplate, vddkConfigMap)
Expand Down
Loading
Loading