diff --git a/Taskfile.yaml b/Taskfile.yaml index e193b2b1d7..93eef4dcee 100644 --- a/Taskfile.yaml +++ b/Taskfile.yaml @@ -215,8 +215,7 @@ tasks: "ports": [ { "containerPort": 2345, "name": "dlv" } ], "readinessProbe": null, "livenessProbe": null, - "command": null, - "args": [] + "command": null }, { "name": "proxy", diff --git a/build/components/versions.yml b/build/components/versions.yml index f8bd000088..906d683c8a 100644 --- a/build/components/versions.yml +++ b/build/components/versions.yml @@ -3,7 +3,7 @@ firmware: libvirt: v10.9.0 edk2: stable202411 core: - 3p-kubevirt: v1.3.1-v12n.24 + 3p-kubevirt: v1.3.1-v12n.25 3p-containerized-data-importer: v1.60.3-v12n.12 distribution: 2.8.3 package: diff --git a/images/virt-handler/debug/dlv.Dockerfile b/images/virt-handler/debug/dlv.Dockerfile index 874864081d..12142d5178 100644 --- a/images/virt-handler/debug/dlv.Dockerfile +++ b/images/virt-handler/debug/dlv.Dockerfile @@ -33,14 +33,19 @@ RUN go mod edit -go=$GOVERSION && \ RUN go mod vendor +RUN for p in patches/*.patch ; do \ + echo -n "Apply ${p} ... " \ + git apply --ignore-space-change --ignore-whitespace ${p} && echo OK || (echo FAIL ; exit 1) \ + done + ENV GO111MODULE=on ENV GOOS=linux ENV CGO_ENABLED=1 ENV GOARCH=amd64 -RUN go build -o /kubevirt-binaries/virt-handler ./cmd/virt-handler/ +RUN go build -gcflags="all=-N -l" -o /kubevirt-binaries/virt-handler ./cmd/virt-handler/ RUN gcc -static cmd/container-disk-v2alpha/main.c -o /kubevirt-binaries/container-disk -RUN go build -o /kubevirt-binaries/virt-chroot ./cmd/virt-chroot/ +RUN go build -gcflags="all=-N -l" -o /kubevirt-binaries/virt-chroot ./cmd/virt-chroot/ FROM basealt @@ -48,7 +53,7 @@ RUN apt-get update && apt-get install --yes \ acl \ procps \ nftables \ - qemu-img==9.0.2-alt3 \ + qemu-img==9.1.2-alt1 \ xorriso==1.5.6-alt1 && \ apt-get clean && \ rm --recursive --force /var/lib/apt/lists/ftp.altlinux.org* /var/cache/apt/*.bin diff --git a/test/e2e/legacy/testdata/vm-restore-safe/vm/base/kustomization.yaml b/test/e2e/legacy/testdata/vm-restore-safe/vm/base/kustomization.yaml index 24e6c5846c..7eb3b5f7a5 100644 --- a/test/e2e/legacy/testdata/vm-restore-safe/vm/base/kustomization.yaml +++ b/test/e2e/legacy/testdata/vm-restore-safe/vm/base/kustomization.yaml @@ -4,8 +4,8 @@ resources: - ./vm.yaml - ./vd-root.yaml - ./vd-blank.yaml + - ./vmbda-vd.yaml # When vmbda is deleted, it may stay in Terminating; a fix is planned. -# - ./vmbda-vd.yaml # - ./vmbda-vi.yaml configurations: - transformer.yaml diff --git a/test/e2e/legacy/vm_restore_force.go b/test/e2e/legacy/vm_restore_force.go index 96235f6ae0..b33e62e0c9 100644 --- a/test/e2e/legacy/vm_restore_force.go +++ b/test/e2e/legacy/vm_restore_force.go @@ -174,27 +174,27 @@ var _ = Describe("VirtualMachineRestoreForce", Ordered, func() { }) }) - // By("Attaching `VirtualDisk` after `VirtualMachine` snapshotting", func() { - // for i, vm := range vms.Items { - // vdName := fmt.Sprintf("%s-%d", "vd-attached-after-vm-snapshotting", i) - // newDisk := NewVirtualDisk(vdName, vm.Namespace, additionalDiskLabel, resource.NewQuantity(1*1024*1024, resource.BinarySI)) - // CreateResource(ctx, newDisk) - // newVmbda := NewVirtualMachineBlockDeviceAttachment(vm.Name, vm.Namespace, newDisk.Name, v1alpha2.VMBDAObjectRefKindVirtualDisk, additionalDiskLabel) - // CreateResource(ctx, newVmbda) - // - // WaitPhaseByLabel( - // v1alpha2.VirtualMachineBlockDeviceAttachmentResource, - // string(v1alpha2.BlockDeviceAttachmentPhaseAttached), - // kc.WaitOptions{ - // Namespace: vm.Namespace, - // Labels: additionalDiskLabel, - // Timeout: LongWaitDuration, - // }) - // err := GetObject(v1alpha2.VirtualMachineKind, vm.Name, &vm, kc.GetOptions{Namespace: vm.Namespace}) - // Expect(err).NotTo(HaveOccurred()) - // Expect(vm.Status.BlockDeviceRefs).To(HaveLen(vmBlockDeviceCountBeforeSnapshotting[vm.Name] + 1)) - // } - // }) + By("Attaching `VirtualDisk` after `VirtualMachine` snapshotting", func() { + for i, vm := range vms.Items { + vdName := fmt.Sprintf("%s-%d", "vd-attached-after-vm-snapshotting", i) + newDisk := NewVirtualDisk(vdName, vm.Namespace, additionalDiskLabel, resource.NewQuantity(1*1024*1024, resource.BinarySI)) + CreateResource(ctx, newDisk) + newVmbda := NewVirtualMachineBlockDeviceAttachment(vm.Name, vm.Namespace, newDisk.Name, v1alpha2.VMBDAObjectRefKindVirtualDisk, additionalDiskLabel) + CreateResource(ctx, newVmbda) + + WaitPhaseByLabel( + v1alpha2.VirtualMachineBlockDeviceAttachmentResource, + string(v1alpha2.BlockDeviceAttachmentPhaseAttached), + kc.WaitOptions{ + Namespace: vm.Namespace, + Labels: additionalDiskLabel, + Timeout: LongWaitDuration, + }) + err := GetObject(v1alpha2.VirtualMachineKind, vm.Name, &vm, kc.GetOptions{Namespace: vm.Namespace}) + Expect(err).NotTo(HaveOccurred()) + Expect(vm.Status.BlockDeviceRefs).To(HaveLen(vmBlockDeviceCountBeforeSnapshotting[vm.Name] + 1)) + } + }) By("Creating `VirtualMachineRestores`", func() { vmsnapshots := &v1alpha2.VirtualMachineSnapshotList{} @@ -295,12 +295,12 @@ var _ = Describe("VirtualMachineRestoreForce", Ordered, func() { // Expect(vd.Labels).To(HaveKeyWithValue(testLabelKey, testLabelValue)) } - // if bd.VirtualMachineBlockDeviceAttachmentName != "" { - // vmbda := &v1alpha2.VirtualMachineBlockDeviceAttachment{} - // err := GetObject(v1alpha2.VirtualMachineBlockDeviceAttachmentKind, bd.VirtualMachineBlockDeviceAttachmentName, vmbda, kc.GetOptions{Namespace: vm.Namespace}) - // Expect(err).NotTo(HaveOccurred()) - // Expect(vmbda.Annotations).To(HaveKeyWithValue(annotations.AnnVMRestore, string(restore.UID))) - // } + if bd.VirtualMachineBlockDeviceAttachmentName != "" { + vmbda := &v1alpha2.VirtualMachineBlockDeviceAttachment{} + err := GetObject(v1alpha2.VirtualMachineBlockDeviceAttachmentKind, bd.VirtualMachineBlockDeviceAttachmentName, vmbda, kc.GetOptions{Namespace: vm.Namespace}) + Expect(err).NotTo(HaveOccurred()) + Expect(vmbda.Annotations).To(HaveKeyWithValue(annotations.AnnVMRestore, string(restore.UID))) + } } } }) diff --git a/test/e2e/legacy/vm_restore_safe.go b/test/e2e/legacy/vm_restore_safe.go index 4087ebf4e4..4bad7a052a 100644 --- a/test/e2e/legacy/vm_restore_safe.go +++ b/test/e2e/legacy/vm_restore_safe.go @@ -23,6 +23,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/resource" "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization/api/core/v1alpha2" @@ -152,28 +153,27 @@ var _ = Describe("VirtualMachineRestoreSafe", Ordered, func() { }) }) - // Disable dut to the issue with virtual disk restoration because of VMBDA. - // By("Attaching `VirtualDisk` after `VirtualMachine` snapshotting", func() { - // for i, vm := range vms.Items { - // vdName := fmt.Sprintf("%s-%d", "vd-attached-after-vm-snapshotting", i) - // newDisk := NewVirtualDisk(vdName, vm.Namespace, additionalDiskLabel, resource.NewQuantity(1*1024*1024, resource.BinarySI)) - // CreateResource(ctx, newDisk) - // newVmbda := NewVirtualMachineBlockDeviceAttachment(vm.Name, vm.Namespace, newDisk.Name, v1alpha2.VMBDAObjectRefKindVirtualDisk, additionalDiskLabel) - // CreateResource(ctx, newVmbda) - // - // WaitPhaseByLabel( - // v1alpha2.VirtualMachineBlockDeviceAttachmentResource, - // string(v1alpha2.BlockDeviceAttachmentPhaseAttached), - // kc.WaitOptions{ - // Namespace: vm.Namespace, - // Labels: additionalDiskLabel, - // Timeout: LongWaitDuration, - // }) - // err := GetObject(v1alpha2.VirtualMachineKind, vm.Name, &vm, kc.GetOptions{Namespace: vm.Namespace}) - // Expect(err).NotTo(HaveOccurred()) - // Expect(vm.Status.BlockDeviceRefs).To(HaveLen(vmBlockDeviceCountBeforeSnapshotting[vm.Name] + 1)) - // } - // }) + By("Attaching `VirtualDisk` after `VirtualMachine` snapshotting", func() { + for i, vm := range vms.Items { + vdName := fmt.Sprintf("%s-%d", "vd-attached-after-vm-snapshotting", i) + newDisk := NewVirtualDisk(vdName, vm.Namespace, additionalDiskLabel, resource.NewQuantity(1*1024*1024, resource.BinarySI)) + CreateResource(ctx, newDisk) + newVmbda := NewVirtualMachineBlockDeviceAttachment(vm.Name, vm.Namespace, newDisk.Name, v1alpha2.VMBDAObjectRefKindVirtualDisk, additionalDiskLabel) + CreateResource(ctx, newVmbda) + + WaitPhaseByLabel( + v1alpha2.VirtualMachineBlockDeviceAttachmentResource, + string(v1alpha2.BlockDeviceAttachmentPhaseAttached), + kc.WaitOptions{ + Namespace: vm.Namespace, + Labels: additionalDiskLabel, + Timeout: LongWaitDuration, + }) + err := GetObject(v1alpha2.VirtualMachineKind, vm.Name, &vm, kc.GetOptions{Namespace: vm.Namespace}) + Expect(err).NotTo(HaveOccurred()) + Expect(vm.Status.BlockDeviceRefs).To(HaveLen(vmBlockDeviceCountBeforeSnapshotting[vm.Name] + 1)) + } + }) By("Deleting `VirtualMachines` and their resources for `Safe` restoring", func() { result := kubectl.Delete(kc.DeleteOptions{ @@ -300,13 +300,12 @@ var _ = Describe("VirtualMachineRestoreSafe", Ordered, func() { // Expect(vd.Labels).To(HaveKeyWithValue(testLabelKey, testLabelValue)) } - // Disable dut to the issue with virtual disk restoration because of VMBDA. - // if bd.VirtualMachineBlockDeviceAttachmentName != "" { - // vmbda := &v1alpha2.VirtualMachineBlockDeviceAttachment{} - // err := GetObject(v1alpha2.VirtualMachineBlockDeviceAttachmentKind, bd.VirtualMachineBlockDeviceAttachmentName, vmbda, kc.GetOptions{Namespace: vm.Namespace}) - // Expect(err).NotTo(HaveOccurred()) - // Expect(vmbda.Annotations).To(HaveKeyWithValue(annotations.AnnVMRestore, string(restore.UID))) - // } + if bd.VirtualMachineBlockDeviceAttachmentName != "" { + vmbda := &v1alpha2.VirtualMachineBlockDeviceAttachment{} + err := GetObject(v1alpha2.VirtualMachineBlockDeviceAttachmentKind, bd.VirtualMachineBlockDeviceAttachmentName, vmbda, kc.GetOptions{Namespace: vm.Namespace}) + Expect(err).NotTo(HaveOccurred()) + Expect(vmbda.Annotations).To(HaveKeyWithValue(annotations.AnnVMRestore, string(restore.UID))) + } } } })