diff --git a/extensions/api/v1alpha1/sandboxclaim_types.go b/extensions/api/v1alpha1/sandboxclaim_types.go index 725eeea76..b4509e071 100644 --- a/extensions/api/v1alpha1/sandboxclaim_types.go +++ b/extensions/api/v1alpha1/sandboxclaim_types.go @@ -95,6 +95,21 @@ type SandboxTemplateRef struct { Name string `json:"name,omitempty" protobuf:"bytes,1,name=name"` } +// WorkspaceResources defines per-claim resource overrides for the workspace container. +type WorkspaceResources struct { + // CPUMillicores is the desired CPU request/limit for the workspace container. + // +optional + CPUMillicores int32 `json:"cpuMillicores,omitempty"` + + // MemoryMB is the desired memory request/limit for the workspace container. + // +optional + MemoryMB int32 `json:"memoryMB,omitempty"` + + // DiskGB is the desired ephemeral-storage request/limit for the workspace container. + // +optional + DiskGB int32 `json:"diskGB,omitempty"` +} + // SandboxClaimSpec defines the desired state of Sandbox type SandboxClaimSpec struct { // sandboxTemplateRef defines the name of the SandboxTemplate to be used for creating a Sandbox. @@ -112,6 +127,10 @@ type SandboxClaimSpec struct { // +optional // +kubebuilder:default=default WarmPool *WarmPoolPolicy `json:"warmpool,omitempty"` + + // WorkspaceResources overrides resource requests/limits for the workspace container at claim time. + // +optional + WorkspaceResources *WorkspaceResources `json:"workspaceResources,omitempty"` } // SandboxClaimStatus defines the observed state of Sandbox. diff --git a/extensions/api/v1alpha1/zz_generated.deepcopy.go b/extensions/api/v1alpha1/zz_generated.deepcopy.go index d02750122..8633e4bac 100644 --- a/extensions/api/v1alpha1/zz_generated.deepcopy.go +++ b/extensions/api/v1alpha1/zz_generated.deepcopy.go @@ -131,6 +131,11 @@ func (in *SandboxClaimSpec) DeepCopyInto(out *SandboxClaimSpec) { *out = new(WarmPoolPolicy) **out = **in } + if in.WorkspaceResources != nil { + in, out := &in.WorkspaceResources, &out.WorkspaceResources + *out = new(WorkspaceResources) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SandboxClaimSpec. @@ -380,3 +385,18 @@ func (in *SandboxWarmPoolStatus) DeepCopy() *SandboxWarmPoolStatus { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceResources) DeepCopyInto(out *WorkspaceResources) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceResources. +func (in *WorkspaceResources) DeepCopy() *WorkspaceResources { + if in == nil { + return nil + } + out := new(WorkspaceResources) + in.DeepCopyInto(out) + return out +} diff --git a/extensions/controllers/sandboxclaim_controller.go b/extensions/controllers/sandboxclaim_controller.go index 4447aec79..776ac7837 100644 --- a/extensions/controllers/sandboxclaim_controller.go +++ b/extensions/controllers/sandboxclaim_controller.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/api/equality" k8errors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/record" @@ -376,8 +377,154 @@ func (r *SandboxClaimReconciler) computeAndSetStatus(claim *extensionsv1alpha1.S } } +func applyWorkspaceResourceOverrides(container *corev1.Container, overrides *extensionsv1alpha1.WorkspaceResources) { + if overrides == nil { + return + } + if container.Resources.Requests == nil { + container.Resources.Requests = corev1.ResourceList{} + } + if container.Resources.Limits == nil { + container.Resources.Limits = corev1.ResourceList{} + } + if overrides.CPUMillicores > 0 { + qty := *resource.NewMilliQuantity(int64(overrides.CPUMillicores), resource.DecimalSI) + container.Resources.Requests[corev1.ResourceCPU] = qty + container.Resources.Limits[corev1.ResourceCPU] = qty + } + if overrides.MemoryMB > 0 { + qty := *resource.NewQuantity(int64(overrides.MemoryMB)*1024*1024, resource.BinarySI) + container.Resources.Requests[corev1.ResourceMemory] = qty + container.Resources.Limits[corev1.ResourceMemory] = qty + } + if overrides.DiskGB > 0 { + qty := *resource.NewQuantity(int64(overrides.DiskGB)*1024*1024*1024, resource.BinarySI) + container.Resources.Requests[corev1.ResourceEphemeralStorage] = qty + container.Resources.Limits[corev1.ResourceEphemeralStorage] = qty + } +} + +func applyClaimWorkspaceResourcesToPodSpec(spec *corev1.PodSpec, claim *extensionsv1alpha1.SandboxClaim) { + if claim.Spec.WorkspaceResources == nil { + return + } + for i := range spec.Containers { + container := &spec.Containers[i] + if container.Name != "workspace" { + continue + } + applyWorkspaceResourceOverrides(container, claim.Spec.WorkspaceResources) + } +} + +// reconcileWorkspaceResources patches the pod's workspace container resources +// in-place if the claim's WorkspaceResources differ from the pod's current values. +// This triggers Kubernetes InPlacePodVerticalScaling (K8s 1.27+). +func (r *SandboxClaimReconciler) reconcileWorkspaceResources(ctx context.Context, sandbox *v1alpha1.Sandbox, claim *extensionsv1alpha1.SandboxClaim) error { + if claim.Spec.WorkspaceResources == nil { + return nil + } + logger := log.FromContext(ctx) + + // Find the pod owned by this sandbox. + pod := &corev1.Pod{} + if err := r.Get(ctx, client.ObjectKey{Namespace: sandbox.Namespace, Name: sandbox.Name}, pod); err != nil { + return nil // Pod may not exist yet (still starting). + } + if pod.Status.Phase != corev1.PodRunning { + return nil // Only resize running pods. + } + + for i, c := range pod.Spec.Containers { + if c.Name != "workspace" { + continue + } + patch := buildResizePatch(c.Resources, claim.Spec.WorkspaceResources) + if patch == nil { + return nil // No change needed. + } + + // Patch pod resources in-place. On K8s 1.27+ with InPlacePodVerticalScaling, + // the kubelet detects the resource change and calls UpdateContainerResources + // on the container runtime (e.g., isol8-runtime update). + podPatch := pod.DeepCopy() + podPatch.Spec.Containers[i].Resources = *patch + if err := r.Patch(ctx, podPatch, client.StrategicMergeFrom(pod)); err != nil { + return fmt.Errorf("patch pod resources: %w", err) + } + logger.Info("resized workspace container", "pod", pod.Name, + "cpuMillicores", claim.Spec.WorkspaceResources.CPUMillicores, + "memoryMB", claim.Spec.WorkspaceResources.MemoryMB) + return nil + } + return nil +} + +// buildResizePatch compares current container resources with the desired +// WorkspaceResources and returns updated ResourceRequirements if they differ. +// Returns nil if no change is needed. DiskGB is intentionally excluded +// because ephemeral storage cannot be resized in-place. +func buildResizePatch(current corev1.ResourceRequirements, desired *extensionsv1alpha1.WorkspaceResources) *corev1.ResourceRequirements { + target := corev1.ResourceRequirements{ + Requests: current.Requests.DeepCopy(), + Limits: current.Limits.DeepCopy(), + } + if target.Requests == nil { + target.Requests = corev1.ResourceList{} + } + if target.Limits == nil { + target.Limits = corev1.ResourceList{} + } + changed := false + + if desired.CPUMillicores > 0 { + qty := *resource.NewMilliQuantity(int64(desired.CPUMillicores), resource.DecimalSI) + if !current.Limits[corev1.ResourceCPU].Equal(qty) { + target.Requests[corev1.ResourceCPU] = qty + target.Limits[corev1.ResourceCPU] = qty + changed = true + } + } + if desired.MemoryMB > 0 { + qty := *resource.NewQuantity(int64(desired.MemoryMB)*1024*1024, resource.BinarySI) + if !current.Limits[corev1.ResourceMemory].Equal(qty) { + target.Requests[corev1.ResourceMemory] = qty + target.Limits[corev1.ResourceMemory] = qty + changed = true + } + } + + if !changed { + return nil + } + return &target +} + +func mergeTemplatePodMetadata(target *v1alpha1.PodMetadata, template v1alpha1.PodMetadata) { + if len(template.Labels) > 0 { + if target.Labels == nil { + target.Labels = make(map[string]string, len(template.Labels)) + } + for k, v := range template.Labels { + if _, exists := target.Labels[k]; !exists { + target.Labels[k] = v + } + } + } + if len(template.Annotations) > 0 { + if target.Annotations == nil { + target.Annotations = make(map[string]string, len(template.Annotations)) + } + for k, v := range template.Annotations { + if _, exists := target.Annotations[k]; !exists { + target.Annotations[k] = v + } + } + } +} + // adoptSandboxFromCandidates picks the best candidate and transfers ownership to the claim. -func (r *SandboxClaimReconciler) adoptSandboxFromCandidates(ctx context.Context, claim *extensionsv1alpha1.SandboxClaim, candidates []*v1alpha1.Sandbox) (*v1alpha1.Sandbox, error) { +func (r *SandboxClaimReconciler) adoptSandboxFromCandidates(ctx context.Context, claim *extensionsv1alpha1.SandboxClaim, template *extensionsv1alpha1.SandboxTemplate, candidates []*v1alpha1.Sandbox) (*v1alpha1.Sandbox, error) { logger := log.FromContext(ctx) // Sort: ready sandboxes first, then by creation time (oldest first) @@ -441,11 +588,16 @@ func (r *SandboxClaimReconciler) adoptSandboxFromCandidates(ctx context.Context, adopted.Annotations[asmetrics.TraceContextAnnotation] = tc } + if template != nil { + mergeTemplatePodMetadata(&adopted.Spec.PodTemplate.ObjectMeta, template.Spec.PodTemplate.ObjectMeta) + } + // Add sandbox ID label to pod template for NetworkPolicy targeting if adopted.Spec.PodTemplate.ObjectMeta.Labels == nil { adopted.Spec.PodTemplate.ObjectMeta.Labels = make(map[string]string) } adopted.Spec.PodTemplate.ObjectMeta.Labels[extensionsv1alpha1.SandboxIDLabel] = string(claim.UID) + applyClaimWorkspaceResourcesToPodSpec(&adopted.Spec.PodTemplate.Spec, claim) // Update uses optimistic concurrency (resourceVersion) so concurrent // claims racing to adopt the same sandbox will conflict and retry. @@ -520,6 +672,7 @@ func (r *SandboxClaimReconciler) createSandbox(ctx context.Context, claim *exten sandbox.Annotations[v1alpha1.SandboxTemplateRefAnnotation] = template.Name template.Spec.PodTemplate.DeepCopyInto(&sandbox.Spec.PodTemplate) + applyClaimWorkspaceResourcesToPodSpec(&sandbox.Spec.PodTemplate.Spec, claim) // TODO: this is a workaround, remove replica assignment related issue #202 replicas := int32(1) sandbox.Spec.Replicas = &replicas @@ -604,12 +757,15 @@ func (r *SandboxClaimReconciler) getOrCreateSandbox(ctx context.Context, claim * } if sandbox != nil { - logger.Info("sandbox already exists, skipping update", "name", sandbox.Name) if !metav1.IsControlledBy(sandbox, claim) { err := fmt.Errorf("sandbox %q is not controlled by claim %q. Please use a different claim name or delete the sandbox manually", sandbox.Name, claim.Name) logger.Error(err, "Sandbox controller mismatch") return nil, err } + // Reconcile workspace resources on the existing pod (in-place resize). + if err := r.reconcileWorkspaceResources(ctx, sandbox, claim); err != nil { + logger.Error(err, "failed to reconcile workspace resources") + } return sandbox, nil } @@ -673,7 +829,12 @@ func (r *SandboxClaimReconciler) getOrCreateSandbox(ctx context.Context, claim * // Try to adopt from warm pool if len(adoptionCandidates) > 0 { logger.V(1).Info("Found warm pool adoption candidates", "count", len(adoptionCandidates), "claim", claim.Name, "warmpool", policy) - adopted, err := r.adoptSandboxFromCandidates(ctx, claim, adoptionCandidates) + var template *extensionsv1alpha1.SandboxTemplate + template, err := r.getTemplate(ctx, claim) + if err != nil && !k8errors.IsNotFound(err) { + return nil, err + } + adopted, err := r.adoptSandboxFromCandidates(ctx, claim, template, adoptionCandidates) if err != nil { return nil, err } diff --git a/extensions/controllers/sandboxclaim_controller_test.go b/extensions/controllers/sandboxclaim_controller_test.go index 26932a9f8..4a71dc826 100644 --- a/extensions/controllers/sandboxclaim_controller_test.go +++ b/extensions/controllers/sandboxclaim_controller_test.go @@ -27,6 +27,7 @@ import ( corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" k8errors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -1132,6 +1133,299 @@ func TestSandboxClaimNoReAdoption(t *testing.T) { } } +func TestSandboxClaimCreateAppliesWorkspaceResources(t *testing.T) { + scheme := newScheme(t) + + template := &extensionsv1alpha1.SandboxTemplate{ + ObjectMeta: metav1.ObjectMeta{Name: "test-template", Namespace: "default"}, + Spec: extensionsv1alpha1.SandboxTemplateSpec{ + PodTemplate: sandboxv1alpha1.PodTemplate{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "workspace", Image: "workspace:latest"}, + {Name: "extra-sidecar", Image: "sidecar:latest"}, + }, + }, + }, + }, + } + + claim := &extensionsv1alpha1.SandboxClaim{ + ObjectMeta: metav1.ObjectMeta{Name: "test-claim", Namespace: "default", UID: "claim-uid"}, + Spec: extensionsv1alpha1.SandboxClaimSpec{ + TemplateRef: extensionsv1alpha1.SandboxTemplateRef{Name: "test-template"}, + WorkspaceResources: &extensionsv1alpha1.WorkspaceResources{ + CPUMillicores: 2000, + MemoryMB: 4096, + DiskGB: 20, + }, + }, + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(template, claim). + WithStatusSubresource(claim). + Build() + + reconciler := &SandboxClaimReconciler{ + Client: fakeClient, + Scheme: scheme, + Recorder: record.NewFakeRecorder(10), + Tracer: asmetrics.NewNoOp(), + } + + req := reconcile.Request{NamespacedName: types.NamespacedName{Name: claim.Name, Namespace: claim.Namespace}} + if _, err := reconciler.Reconcile(context.Background(), req); err != nil { + t.Fatalf("reconcile failed: %v", err) + } + + var sandbox sandboxv1alpha1.Sandbox + if err := fakeClient.Get(context.Background(), req.NamespacedName, &sandbox); err != nil { + t.Fatalf("failed to get created sandbox: %v", err) + } + + var workspace, sidecar *corev1.Container + for i := range sandbox.Spec.PodTemplate.Spec.Containers { + container := &sandbox.Spec.PodTemplate.Spec.Containers[i] + switch container.Name { + case "workspace": + workspace = container + case "extra-sidecar": + sidecar = container + } + } + if workspace == nil { + t.Fatal("workspace container not found in created sandbox") + } + if sidecar == nil { + t.Fatal("sidecar container not found in created sandbox") + } + + if got := workspace.Resources.Requests[corev1.ResourceCPU]; got.Cmp(resource.MustParse("2000m")) != 0 { + t.Fatalf("expected workspace CPU request 2000m, got %s", got.String()) + } + if got := workspace.Resources.Limits[corev1.ResourceCPU]; got.Cmp(resource.MustParse("2000m")) != 0 { + t.Fatalf("expected workspace CPU limit 2000m, got %s", got.String()) + } + if got := workspace.Resources.Requests[corev1.ResourceMemory]; got.Cmp(resource.MustParse("4096Mi")) != 0 { + t.Fatalf("expected workspace memory request 4096Mi, got %s", got.String()) + } + if got := workspace.Resources.Limits[corev1.ResourceMemory]; got.Cmp(resource.MustParse("4096Mi")) != 0 { + t.Fatalf("expected workspace memory limit 4096Mi, got %s", got.String()) + } + if got := workspace.Resources.Requests[corev1.ResourceEphemeralStorage]; got.Cmp(resource.MustParse("20Gi")) != 0 { + t.Fatalf("expected workspace disk request 20Gi, got %s", got.String()) + } + if got := workspace.Resources.Limits[corev1.ResourceEphemeralStorage]; got.Cmp(resource.MustParse("20Gi")) != 0 { + t.Fatalf("expected workspace disk limit 20Gi, got %s", got.String()) + } + if len(sidecar.Resources.Requests) != 0 || len(sidecar.Resources.Limits) != 0 { + t.Fatalf("expected sidecar resources to remain untouched, got requests=%v limits=%v", sidecar.Resources.Requests, sidecar.Resources.Limits) + } +} + +func TestSandboxClaimCreateIgnoresWorkspaceResourcesWithoutWorkspaceContainer(t *testing.T) { + scheme := newScheme(t) + + template := &extensionsv1alpha1.SandboxTemplate{ + ObjectMeta: metav1.ObjectMeta{Name: "test-template", Namespace: "default"}, + Spec: extensionsv1alpha1.SandboxTemplateSpec{ + PodTemplate: sandboxv1alpha1.PodTemplate{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "pause", Image: "registry.k8s.io/pause:3.10"}, + }, + }, + }, + }, + } + + claim := &extensionsv1alpha1.SandboxClaim{ + ObjectMeta: metav1.ObjectMeta{Name: "test-claim", Namespace: "default", UID: "claim-uid"}, + Spec: extensionsv1alpha1.SandboxClaimSpec{ + TemplateRef: extensionsv1alpha1.SandboxTemplateRef{Name: "test-template"}, + WorkspaceResources: &extensionsv1alpha1.WorkspaceResources{ + CPUMillicores: 2000, + MemoryMB: 4096, + DiskGB: 20, + }, + }, + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(template, claim). + WithStatusSubresource(claim). + Build() + + reconciler := &SandboxClaimReconciler{ + Client: fakeClient, + Scheme: scheme, + Recorder: record.NewFakeRecorder(10), + Tracer: asmetrics.NewNoOp(), + } + + req := reconcile.Request{NamespacedName: types.NamespacedName{Name: claim.Name, Namespace: claim.Namespace}} + if _, err := reconciler.Reconcile(context.Background(), req); err != nil { + t.Fatalf("reconcile failed: %v", err) + } + + var sandbox sandboxv1alpha1.Sandbox + if err := fakeClient.Get(context.Background(), req.NamespacedName, &sandbox); err != nil { + t.Fatalf("failed to get created sandbox: %v", err) + } + + if len(sandbox.Spec.PodTemplate.Spec.Containers) != 1 { + t.Fatalf("expected one container, got %d", len(sandbox.Spec.PodTemplate.Spec.Containers)) + } + container := sandbox.Spec.PodTemplate.Spec.Containers[0] + if container.Name != "pause" { + t.Fatalf("expected pause container, got %q", container.Name) + } + if len(container.Resources.Requests) != 0 || len(container.Resources.Limits) != 0 { + t.Fatalf("expected non-workspace container resources to remain untouched, got requests=%v limits=%v", container.Resources.Requests, container.Resources.Limits) + } +} + +func TestSandboxClaimAdoptionAppliesWorkspaceResources(t *testing.T) { + scheme := newScheme(t) + + template := &extensionsv1alpha1.SandboxTemplate{ + ObjectMeta: metav1.ObjectMeta{Name: "test-template", Namespace: "default"}, + Spec: extensionsv1alpha1.SandboxTemplateSpec{ + PodTemplate: sandboxv1alpha1.PodTemplate{ + ObjectMeta: sandboxv1alpha1.PodMetadata{ + Annotations: map[string]string{ + "example.com/workspace": "true", + "test-annotation": "template", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "workspace", Image: "workspace:latest"}, + {Name: "extra-sidecar", Image: "sidecar:latest"}, + }, + }, + }, + }, + } + + claim := &extensionsv1alpha1.SandboxClaim{ + ObjectMeta: metav1.ObjectMeta{Name: "test-claim", Namespace: "default", UID: "claim-uid"}, + Spec: extensionsv1alpha1.SandboxClaimSpec{ + TemplateRef: extensionsv1alpha1.SandboxTemplateRef{Name: "test-template"}, + WorkspaceResources: &extensionsv1alpha1.WorkspaceResources{ + CPUMillicores: 2000, + MemoryMB: 4096, + DiskGB: 20, + }, + }, + } + + warmSandbox := &sandboxv1alpha1.Sandbox{ + ObjectMeta: metav1.ObjectMeta{ + Name: "warm-sb", + Namespace: "default", + Labels: map[string]string{ + warmPoolSandboxLabel: sandboxcontrollers.NameHash("test-pool"), + sandboxTemplateRefHash: sandboxcontrollers.NameHash("test-template"), + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "extensions.agents.x-k8s.io/v1alpha1", + Kind: "SandboxWarmPool", + Name: "test-pool", + UID: "pool-uid", + Controller: ptr.To(true), + }, + }, + }, + Spec: sandboxv1alpha1.SandboxSpec{ + Replicas: ptr.To(int32(1)), + PodTemplate: sandboxv1alpha1.PodTemplate{ + ObjectMeta: sandboxv1alpha1.PodMetadata{ + Annotations: map[string]string{ + "example.com/workspace": "true", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "workspace", Image: "workspace:latest"}, + {Name: "extra-sidecar", Image: "sidecar:latest"}, + }, + }, + }, + }, + Status: sandboxv1alpha1.SandboxStatus{ + Conditions: []metav1.Condition{{ + Type: string(sandboxv1alpha1.SandboxConditionReady), + Status: metav1.ConditionTrue, + Reason: "Ready", + }}, + }, + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(template, claim, warmSandbox). + WithStatusSubresource(claim). + Build() + + reconciler := &SandboxClaimReconciler{ + Client: fakeClient, + Scheme: scheme, + Recorder: record.NewFakeRecorder(10), + Tracer: asmetrics.NewNoOp(), + } + + req := reconcile.Request{NamespacedName: types.NamespacedName{Name: claim.Name, Namespace: claim.Namespace}} + if _, err := reconciler.Reconcile(context.Background(), req); err != nil { + t.Fatalf("reconcile failed: %v", err) + } + + var adopted sandboxv1alpha1.Sandbox + if err := fakeClient.Get(context.Background(), types.NamespacedName{Name: "warm-sb", Namespace: "default"}, &adopted); err != nil { + t.Fatalf("failed to get adopted sandbox: %v", err) + } + + var workspace, sidecar *corev1.Container + for i := range adopted.Spec.PodTemplate.Spec.Containers { + container := &adopted.Spec.PodTemplate.Spec.Containers[i] + switch container.Name { + case "workspace": + workspace = container + case "extra-sidecar": + sidecar = container + } + } + if workspace == nil { + t.Fatal("workspace container not found in adopted sandbox") + } + if sidecar == nil { + t.Fatal("sidecar container not found in adopted sandbox") + } + + if got := workspace.Resources.Requests[corev1.ResourceCPU]; got.Cmp(resource.MustParse("2000m")) != 0 { + t.Fatalf("expected adopted workspace CPU request 2000m, got %s", got.String()) + } + if got := workspace.Resources.Requests[corev1.ResourceMemory]; got.Cmp(resource.MustParse("4096Mi")) != 0 { + t.Fatalf("expected adopted workspace memory request 4096Mi, got %s", got.String()) + } + if got := workspace.Resources.Requests[corev1.ResourceEphemeralStorage]; got.Cmp(resource.MustParse("20Gi")) != 0 { + t.Fatalf("expected adopted workspace disk request 20Gi, got %s", got.String()) + } + if len(sidecar.Resources.Requests) != 0 || len(sidecar.Resources.Limits) != 0 { + t.Fatalf("expected adopted sidecar resources to remain untouched, got requests=%v limits=%v", sidecar.Resources.Requests, sidecar.Resources.Limits) + } + if got := adopted.Spec.PodTemplate.ObjectMeta.Annotations["example.com/workspace"]; got != "true" { + t.Fatalf("expected adopted workspace annotation to survive, got %q", got) + } + if got := adopted.Spec.PodTemplate.ObjectMeta.Annotations["test-annotation"]; got != "template" { + t.Fatalf("expected template annotation to be restored on adoption, got %q", got) + } +} + func TestRecordCreationLatencyMetric(t *testing.T) { ctx := context.Background() pastTime := metav1.Time{Time: time.Now().Add(-10 * time.Second)} diff --git a/k8s/crds/extensions.agents.x-k8s.io_sandboxclaims.yaml b/k8s/crds/extensions.agents.x-k8s.io_sandboxclaims.yaml index b7615afe0..20b1ed801 100644 --- a/k8s/crds/extensions.agents.x-k8s.io_sandboxclaims.yaml +++ b/k8s/crds/extensions.agents.x-k8s.io_sandboxclaims.yaml @@ -51,6 +51,18 @@ spec: warmpool: default: default type: string + workspaceResources: + properties: + cpuMillicores: + format: int32 + type: integer + diskGB: + format: int32 + type: integer + memoryMB: + format: int32 + type: integer + type: object required: - sandboxTemplateRef type: object