From 3bfd3abe32f0010a62878dd2920a5cbe67701a65 Mon Sep 17 00:00:00 2001 From: Karthik K N Date: Tue, 17 Feb 2026 09:40:49 +0000 Subject: [PATCH 1/2] Implement devmachinepool --- .../api/v1beta2/devmachinepool_types.go | 153 +++++ .../v1beta2/devmachinepooltemplate_types.go | 64 ++ .../api/v1beta2/zz_generated.deepcopy.go | 240 +++++++ ...ture.cluster.x-k8s.io_devmachinepools.yaml | 256 ++++++++ ...ster.x-k8s.io_devmachinepooltemplates.yaml | 147 +++++ .../docker/config/crd/kustomization.yaml | 2 + .../docker/config/rbac/role.yaml | 3 + .../docker/controllers/alias.go | 18 + .../backends/docker/dockercluster_backend.go | 2 +- .../docker/dockermachinepool_backend.go | 617 ++++++++++++++++++ .../controllers/backends/machinepool.go | 34 + .../controllers/devmachinepool_controller.go | 249 +++++++ test/infrastructure/docker/main.go | 11 + .../templates/cluster-template-dev-mp.yaml | 23 + .../clusterclass-dev-quick-start.yaml | 146 +++++ 15 files changed, 1964 insertions(+), 1 deletion(-) create mode 100644 test/infrastructure/docker/api/v1beta2/devmachinepool_types.go create mode 100644 test/infrastructure/docker/api/v1beta2/devmachinepooltemplate_types.go create mode 100644 test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_devmachinepools.yaml create mode 100644 test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_devmachinepooltemplates.yaml create mode 100644 test/infrastructure/docker/internal/controllers/backends/docker/dockermachinepool_backend.go create mode 100644 test/infrastructure/docker/internal/controllers/backends/machinepool.go create mode 100644 test/infrastructure/docker/internal/controllers/devmachinepool_controller.go create mode 100644 test/infrastructure/docker/templates/cluster-template-dev-mp.yaml create mode 100644 test/infrastructure/docker/templates/clusterclass-dev-quick-start.yaml diff --git a/test/infrastructure/docker/api/v1beta2/devmachinepool_types.go b/test/infrastructure/docker/api/v1beta2/devmachinepool_types.go new file mode 100644 index 000000000000..2f4f9df4bf30 --- /dev/null +++ b/test/infrastructure/docker/api/v1beta2/devmachinepool_types.go @@ -0,0 +1,153 @@ +/* +Copyright 2026 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" +) + +const ( + // DevMachinePoolFinalizer allows ReconcileDevMachinePool to clean up resources. + DevMachinePoolFinalizer = "devmachinepool.infrastructure.cluster.x-k8s.io" +) + +const ( + // ReplicasReadyCondition reports an aggregate of current status of the replicas controlled by the MachinePool. + ReplicasReadyCondition string = "ReplicasReady" + + // ReplicasReadyReason surfaces when the DevMachinePool ReplicasReadyConditio is met. + ReplicasReadyReason string = clusterv1.ReadyReason +) + +// DevMachinePool's conditions that apply to all the supported backends. + +// DevMachinePool's Ready condition and corresponding reasons. +const ( + // DevMachinePoolReadyCondition is true if + // - The DevMachinePool's is using a docker backend and ReplicasReadyCondition is true. + DevMachinePoolReadyCondition = clusterv1.ReadyCondition + + // DevMachinePoolReadyReason surfaces when the DevMachinePool readiness criteria is met. + DevMachinePoolReadyReason = clusterv1.ReadyReason + + // DevMachinePoolNotReadyReason surfaces when the DevMachinePool readiness criteria is not met. + DevMachinePoolNotReadyReason = clusterv1.NotReadyReason + + // DevMachinePoolReadyUnknownReason surfaces when at least one DevMachinePool readiness criteria is unknown + // and no DevMachinePool readiness criteria is not met. + DevMachinePoolReadyUnknownReason = clusterv1.ReadyUnknownReason +) + +// +kubebuilder:resource:path=devmachinepools,scope=Namespaced,categories=cluster-api +// +kubebuilder:storageversion +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time duration since creation of DevMachinePool" + +// DevMachinePool is the Schema for the devmachinepools API. +type DevMachinePool struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec DevMachinePoolSpec `json:"spec,omitempty"` + Status DevMachinePoolStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DevMachinePoolList contains a list of DevMachinePool. +type DevMachinePoolList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DevMachinePool `json:"items"` +} + +// DevMachinePoolSpec defines the desired state of DevMachinePool. +type DevMachinePoolSpec struct { + // ProviderID is the identification ID of the Machine Pool + // +optional + ProviderID string `json:"providerID,omitempty"` + + // ProviderIDList is the list of identification IDs of machine instances managed by this Machine Pool + // +optional + ProviderIDList []string `json:"providerIDList,omitempty"` + + // Template contains the details used to build a replica machine within the Machine Pool + // +optional + Template DevMachinePoolBackendTemplate `json:"template"` +} + +// DevMachinePoolBackendTemplate defines backends for a DevMachinePool. +type DevMachinePoolBackendTemplate struct { + // docker defines a backend for a DevMachine using docker containers. + // +optional + Docker *DockerMachinePoolMachineTemplate `json:"docker,omitempty"` +} + +// DevMachinePoolStatus defines the observed state of DevMachinePool. +type DevMachinePoolStatus struct { + // conditions represents the observations of a DevMachinePool's current state. + // Known condition types are Ready, ReplicasReady, Resized, ReplicasReady. + // +optional + // +listType=map + // +listMapKey=type + // +kubebuilder:validation:MaxItems=32 + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // Ready denotes that the machine pool is ready + // +optional + Ready bool `json:"ready"` + + // Replicas is the most recently observed number of replicas. + // +optional + Replicas int32 `json:"replicas"` + + // The generation observed by the deployment controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // InfrastructureMachineKind is the kind of the infrastructure resources behind MachinePool Machines. + // +optional + InfrastructureMachineKind string `json:"infrastructureMachineKind,omitempty"` + + // Instances contains the status for each instance in the pool + // +optional + Instances []DevMachinePoolBackendInstanceStatus `json:"instances,omitempty"` +} + +// DevMachinePoolBackendInstanceStatus contains status information about a DevMachinePool instances. +type DevMachinePoolBackendInstanceStatus struct { + // docker define backend status for a DevMachine for a machine using docker containers. + // +optional + Docker *DockerMachinePoolInstanceStatus `json:"docker,omitempty"` +} + +// GetConditions returns the set of conditions for this object. +func (d *DevMachinePool) GetConditions() []metav1.Condition { + return d.Status.Conditions +} + +// SetConditions sets conditions for an API object. +func (d *DevMachinePool) SetConditions(conditions []metav1.Condition) { + d.Status.Conditions = conditions +} + +func init() { + objectTypes = append(objectTypes, &DevMachinePool{}, &DevMachinePoolList{}) +} diff --git a/test/infrastructure/docker/api/v1beta2/devmachinepooltemplate_types.go b/test/infrastructure/docker/api/v1beta2/devmachinepooltemplate_types.go new file mode 100644 index 000000000000..562d4d8cbc99 --- /dev/null +++ b/test/infrastructure/docker/api/v1beta2/devmachinepooltemplate_types.go @@ -0,0 +1,64 @@ +/* +Copyright 2026 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" +) + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=devmachinepooltemplates,scope=Namespaced,categories=cluster-api +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time duration since creation of DevMachinePoolTemplate" + +// DevMachinePoolTemplate is the Schema for the devmachinepooltemplates API. +type DevMachinePoolTemplate struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec DevMachinePoolTemplateSpec `json:"spec,omitempty"` +} + +// +kubebuilder:object:root=true + +// DevMachinePoolTemplateList contains a list of DevMachinePoolTemplate. +type DevMachinePoolTemplateList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DevMachinePoolTemplate `json:"items"` +} + +// DevMachinePoolTemplateSpec defines the desired state of DevMachinePoolTemplate. +type DevMachinePoolTemplateSpec struct { + Template DevMachinePoolTemplateResource `json:"template"` +} + +// DevMachinePoolTemplateResource describes the data needed to create a DevMachine from a template. +type DevMachinePoolTemplateResource struct { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + ObjectMeta clusterv1.ObjectMeta `json:"metadata,omitempty,omitzero"` + + Spec DevMachinePoolSpec `json:"spec"` +} + +func init() { + objectTypes = append(objectTypes, &DevMachinePoolTemplate{}, &DevMachinePoolTemplateList{}) +} diff --git a/test/infrastructure/docker/api/v1beta2/zz_generated.deepcopy.go b/test/infrastructure/docker/api/v1beta2/zz_generated.deepcopy.go index 59ccae81616d..e6bb4e2cba3c 100644 --- a/test/infrastructure/docker/api/v1beta2/zz_generated.deepcopy.go +++ b/test/infrastructure/docker/api/v1beta2/zz_generated.deepcopy.go @@ -491,6 +491,246 @@ func (in *DevMachineList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DevMachinePool) DeepCopyInto(out *DevMachinePool) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevMachinePool. +func (in *DevMachinePool) DeepCopy() *DevMachinePool { + if in == nil { + return nil + } + out := new(DevMachinePool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DevMachinePool) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DevMachinePoolBackendInstanceStatus) DeepCopyInto(out *DevMachinePoolBackendInstanceStatus) { + *out = *in + if in.Docker != nil { + in, out := &in.Docker, &out.Docker + *out = new(DockerMachinePoolInstanceStatus) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevMachinePoolBackendInstanceStatus. +func (in *DevMachinePoolBackendInstanceStatus) DeepCopy() *DevMachinePoolBackendInstanceStatus { + if in == nil { + return nil + } + out := new(DevMachinePoolBackendInstanceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DevMachinePoolBackendTemplate) DeepCopyInto(out *DevMachinePoolBackendTemplate) { + *out = *in + if in.Docker != nil { + in, out := &in.Docker, &out.Docker + *out = new(DockerMachinePoolMachineTemplate) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevMachinePoolBackendTemplate. +func (in *DevMachinePoolBackendTemplate) DeepCopy() *DevMachinePoolBackendTemplate { + if in == nil { + return nil + } + out := new(DevMachinePoolBackendTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DevMachinePoolList) DeepCopyInto(out *DevMachinePoolList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DevMachinePool, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevMachinePoolList. +func (in *DevMachinePoolList) DeepCopy() *DevMachinePoolList { + if in == nil { + return nil + } + out := new(DevMachinePoolList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DevMachinePoolList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DevMachinePoolSpec) DeepCopyInto(out *DevMachinePoolSpec) { + *out = *in + if in.ProviderIDList != nil { + in, out := &in.ProviderIDList, &out.ProviderIDList + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.Template.DeepCopyInto(&out.Template) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevMachinePoolSpec. +func (in *DevMachinePoolSpec) DeepCopy() *DevMachinePoolSpec { + if in == nil { + return nil + } + out := new(DevMachinePoolSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DevMachinePoolStatus) DeepCopyInto(out *DevMachinePoolStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Instances != nil { + in, out := &in.Instances, &out.Instances + *out = make([]DevMachinePoolBackendInstanceStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevMachinePoolStatus. +func (in *DevMachinePoolStatus) DeepCopy() *DevMachinePoolStatus { + if in == nil { + return nil + } + out := new(DevMachinePoolStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DevMachinePoolTemplate) DeepCopyInto(out *DevMachinePoolTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevMachinePoolTemplate. +func (in *DevMachinePoolTemplate) DeepCopy() *DevMachinePoolTemplate { + if in == nil { + return nil + } + out := new(DevMachinePoolTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DevMachinePoolTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DevMachinePoolTemplateList) DeepCopyInto(out *DevMachinePoolTemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DevMachinePoolTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevMachinePoolTemplateList. +func (in *DevMachinePoolTemplateList) DeepCopy() *DevMachinePoolTemplateList { + if in == nil { + return nil + } + out := new(DevMachinePoolTemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DevMachinePoolTemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DevMachinePoolTemplateResource) DeepCopyInto(out *DevMachinePoolTemplateResource) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevMachinePoolTemplateResource. +func (in *DevMachinePoolTemplateResource) DeepCopy() *DevMachinePoolTemplateResource { + if in == nil { + return nil + } + out := new(DevMachinePoolTemplateResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DevMachinePoolTemplateSpec) DeepCopyInto(out *DevMachinePoolTemplateSpec) { + *out = *in + in.Template.DeepCopyInto(&out.Template) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevMachinePoolTemplateSpec. +func (in *DevMachinePoolTemplateSpec) DeepCopy() *DevMachinePoolTemplateSpec { + if in == nil { + return nil + } + out := new(DevMachinePoolTemplateSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DevMachineSpec) DeepCopyInto(out *DevMachineSpec) { *out = *in diff --git a/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_devmachinepools.yaml b/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_devmachinepools.yaml new file mode 100644 index 000000000000..5b6a91f11546 --- /dev/null +++ b/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_devmachinepools.yaml @@ -0,0 +1,256 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.20.0 + name: devmachinepools.infrastructure.cluster.x-k8s.io +spec: + group: infrastructure.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: DevMachinePool + listKind: DevMachinePoolList + plural: devmachinepools + singular: devmachinepool + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Time duration since creation of DevMachinePool + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: DevMachinePool is the Schema for the devmachinepools API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DevMachinePoolSpec defines the desired state of DevMachinePool. + properties: + providerID: + description: ProviderID is the identification ID of the Machine Pool + type: string + providerIDList: + description: ProviderIDList is the list of identification IDs of machine + instances managed by this Machine Pool + items: + type: string + type: array + template: + description: Template contains the details used to build a replica + machine within the Machine Pool + properties: + docker: + description: docker defines a backend for a DevMachine using docker + containers. + properties: + customImage: + description: |- + CustomImage allows customizing the container image that is used for + running the machine + type: string + extraMounts: + description: |- + ExtraMounts describes additional mount points for the node container + These may be used to bind a hostPath + items: + description: |- + Mount specifies a host volume to mount into a container. + This is a simplified version of kind v1alpha4.Mount types. + properties: + containerPath: + description: Path of the mount within the container. + type: string + hostPath: + description: |- + Path of the mount on the host. If the hostPath doesn't exist, then runtimes + should report error. If the hostpath is a symbolic link, runtimes should + follow the symlink and mount the real destination to container. + type: string + readOnly: + description: If set, the mount is read-only. + type: boolean + type: object + type: array + preLoadImages: + description: |- + PreLoadImages allows to pre-load images in a newly created machine. This can be used to + speed up tests by avoiding e.g. to download CNI images on all the containers. + items: + type: string + type: array + type: object + type: object + type: object + status: + description: DevMachinePoolStatus defines the observed state of DevMachinePool. + properties: + conditions: + description: |- + conditions represents the observations of a DevMachinePool's current state. + Known condition types are Ready, ReplicasReady, Resized, ReplicasReady. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + maxItems: 32 + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + infrastructureMachineKind: + description: InfrastructureMachineKind is the kind of the infrastructure + resources behind MachinePool Machines. + type: string + instances: + description: Instances contains the status for each instance in the + pool + items: + description: DevMachinePoolBackendInstanceStatus contains status + information about a DevMachinePool instances. + properties: + docker: + description: docker define backend status for a DevMachine for + a machine using docker containers. + properties: + addresses: + description: Addresses contains the associated addresses + for the docker machine. + items: + description: MachineAddress contains information for the + node's address. + properties: + address: + description: address is the machine address. + maxLength: 256 + minLength: 1 + type: string + type: + description: type is the machine address type, one + of Hostname, ExternalIP, InternalIP, ExternalDNS + or InternalDNS. + enum: + - Hostname + - ExternalIP + - InternalIP + - ExternalDNS + - InternalDNS + type: string + required: + - address + - type + type: object + type: array + bootstrapped: + description: |- + Bootstrapped is true when the kubeadm bootstrapping has been run + against this machine + + Deprecated: This field will be removed in the next apiVersion. + When removing also remove from staticcheck exclude-rules for SA1019 in golangci.yml + type: boolean + instanceName: + description: InstanceName is the identification of the Machine + Instance within the Machine Pool + type: string + providerID: + description: ProviderID is the provider identification of + the Machine Pool Instance + type: string + ready: + description: Ready denotes that the machine (docker container) + is ready + type: boolean + version: + description: Version defines the Kubernetes version for + the Machine Instance + type: string + type: object + type: object + type: array + observedGeneration: + description: The generation observed by the deployment controller. + format: int64 + type: integer + ready: + description: Ready denotes that the machine pool is ready + type: boolean + replicas: + description: Replicas is the most recently observed number of replicas. + format: int32 + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_devmachinepooltemplates.yaml b/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_devmachinepooltemplates.yaml new file mode 100644 index 000000000000..6449b36dc24d --- /dev/null +++ b/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_devmachinepooltemplates.yaml @@ -0,0 +1,147 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.20.0 + name: devmachinepooltemplates.infrastructure.cluster.x-k8s.io +spec: + group: infrastructure.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: DevMachinePoolTemplate + listKind: DevMachinePoolTemplateList + plural: devmachinepooltemplates + singular: devmachinepooltemplate + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Time duration since creation of DevMachinePoolTemplate + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: DevMachinePoolTemplate is the Schema for the devmachinepooltemplates + API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DevMachinePoolTemplateSpec defines the desired state of DevMachinePoolTemplate. + properties: + template: + description: DevMachinePoolTemplateResource describes the data needed + to create a DevMachine from a template. + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + minProperties: 1 + properties: + annotations: + additionalProperties: + type: string + description: |- + annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + labels is a map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + type: object + spec: + description: DevMachinePoolSpec defines the desired state of DevMachinePool. + properties: + providerID: + description: ProviderID is the identification ID of the Machine + Pool + type: string + providerIDList: + description: ProviderIDList is the list of identification + IDs of machine instances managed by this Machine Pool + items: + type: string + type: array + template: + description: Template contains the details used to build a + replica machine within the Machine Pool + properties: + docker: + description: docker defines a backend for a DevMachine + using docker containers. + properties: + customImage: + description: |- + CustomImage allows customizing the container image that is used for + running the machine + type: string + extraMounts: + description: |- + ExtraMounts describes additional mount points for the node container + These may be used to bind a hostPath + items: + description: |- + Mount specifies a host volume to mount into a container. + This is a simplified version of kind v1alpha4.Mount types. + properties: + containerPath: + description: Path of the mount within the container. + type: string + hostPath: + description: |- + Path of the mount on the host. If the hostPath doesn't exist, then runtimes + should report error. If the hostpath is a symbolic link, runtimes should + follow the symlink and mount the real destination to container. + type: string + readOnly: + description: If set, the mount is read-only. + type: boolean + type: object + type: array + preLoadImages: + description: |- + PreLoadImages allows to pre-load images in a newly created machine. This can be used to + speed up tests by avoiding e.g. to download CNI images on all the containers. + items: + type: string + type: array + type: object + type: object + type: object + required: + - spec + type: object + required: + - template + type: object + type: object + served: true + storage: true + subresources: {} diff --git a/test/infrastructure/docker/config/crd/kustomization.yaml b/test/infrastructure/docker/config/crd/kustomization.yaml index d7e348b9eaf2..392fe2b8b254 100644 --- a/test/infrastructure/docker/config/crd/kustomization.yaml +++ b/test/infrastructure/docker/config/crd/kustomization.yaml @@ -21,6 +21,8 @@ resources: - bases/infrastructure.cluster.x-k8s.io_devclusters.yaml - bases/infrastructure.cluster.x-k8s.io_devclustertemplates.yaml - bases/infrastructure.cluster.x-k8s.io_devmachinetemplates.yaml +- bases/infrastructure.cluster.x-k8s.io_devmachinepools.yaml +- bases/infrastructure.cluster.x-k8s.io_devmachinepooltemplates.yaml # +kubebuilder:scaffold:crdkustomizeresource patches: diff --git a/test/infrastructure/docker/config/rbac/role.yaml b/test/infrastructure/docker/config/rbac/role.yaml index 4fa6890d4d0f..7aba9553e033 100644 --- a/test/infrastructure/docker/config/rbac/role.yaml +++ b/test/infrastructure/docker/config/rbac/role.yaml @@ -76,6 +76,7 @@ rules: - infrastructure.cluster.x-k8s.io resources: - devclusters + - devmachinepools - devmachines - dockerclusters - dockermachinepools @@ -94,6 +95,8 @@ rules: - devclusters/finalizers - devclusters/status - devclustertemplates + - devmachinepools/finalizers + - devmachinepools/status - devmachines/finalizers - devmachines/status - devmachinetemplates diff --git a/test/infrastructure/docker/controllers/alias.go b/test/infrastructure/docker/controllers/alias.go index 6e7d0fee93ce..6afb3d2dbcfd 100644 --- a/test/infrastructure/docker/controllers/alias.go +++ b/test/infrastructure/docker/controllers/alias.go @@ -171,3 +171,21 @@ func (r *DockerMachinePoolReconciler) SetupWithManager(ctx context.Context, mgr WatchFilterValue: r.WatchFilterValue, }).SetupWithManager(ctx, mgr, options) } + +// DevMachinePoolReconciler reconciles a DevMachinePool object. +type DevMachinePoolReconciler struct { + Client client.Client + ContainerRuntime container.Runtime + + // WatchFilterValue is the label value used to filter events prior to reconciliation. + WatchFilterValue string +} + +// SetupWithManager will add watches for this controller. +func (r *DevMachinePoolReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { + return (&dockercontrollers.DevMachinePoolReconciler{ + Client: r.Client, + ContainerRuntime: r.ContainerRuntime, + WatchFilterValue: r.WatchFilterValue, + }).SetupWithManager(ctx, mgr, options) +} diff --git a/test/infrastructure/docker/internal/controllers/backends/docker/dockercluster_backend.go b/test/infrastructure/docker/internal/controllers/backends/docker/dockercluster_backend.go index d30868791bb3..699d69990e7c 100644 --- a/test/infrastructure/docker/internal/controllers/backends/docker/dockercluster_backend.go +++ b/test/infrastructure/docker/internal/controllers/backends/docker/dockercluster_backend.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package docker implements docker backends for DevClusters and DevMachines. +// Package docker implements docker backends for DevClusters, DevMachines and DevMachinePools. package docker import ( diff --git a/test/infrastructure/docker/internal/controllers/backends/docker/dockermachinepool_backend.go b/test/infrastructure/docker/internal/controllers/backends/docker/dockermachinepool_backend.go new file mode 100644 index 000000000000..136127479064 --- /dev/null +++ b/test/infrastructure/docker/internal/controllers/backends/docker/dockermachinepool_backend.go @@ -0,0 +1,617 @@ +/* +Copyright 2026 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package docker + +import ( + "context" + "fmt" + "math/rand" + "sort" + + "github.com/blang/semver/v4" + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/kind/pkg/cluster/constants" + + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/cluster-api/internal/util/ssa" + "sigs.k8s.io/cluster-api/test/infrastructure/container" + infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta2" + "sigs.k8s.io/cluster-api/test/infrastructure/docker/internal/docker" + "sigs.k8s.io/cluster-api/test/infrastructure/kind" + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" + "sigs.k8s.io/cluster-api/util/labels/format" + "sigs.k8s.io/cluster-api/util/patch" +) + +const ( + // devMachinePoolLabel is the label used to identify the DevMachinePool. + devMachinePoolLabel = "dev.cluster.x-k8s.io/machine-pool" + + devMachinePoolControllerName = "devmachinepool-controller" +) + +// MachinePoolBackEndReconciler reconciles a DockerMachinePool object. +type MachinePoolBackEndReconciler struct { + client.Client + ContainerRuntime container.Runtime + SsaCache ssa.Cache +} + +// ReconcileNormal handle docker backend for DevMachinePool not yet deleted. +func (r *MachinePoolBackEndReconciler) ReconcileNormal(ctx context.Context, cluster *clusterv1.Cluster, machinePool *clusterv1.MachinePool, devMachinePool *infrav1.DevMachinePool) (ctrl.Result, error) { + if devMachinePool.Spec.Template.Docker == nil { + return ctrl.Result{}, errors.New("DockerMachinePoolBackEndReconciler can't be called for DevMachinePools without a Docker backend") + } + + log := ctrl.LoggerFrom(ctx) + + // Make sure bootstrap data is available and populated. + if machinePool.Spec.Template.Spec.Bootstrap.DataSecretName == nil { + log.Info("Waiting for the Bootstrap provider controller to set bootstrap data") + return ctrl.Result{}, nil + } + + if machinePool.Spec.Replicas == nil { + machinePool.Spec.Replicas = ptr.To[int32](1) + } + + // First, reconcile the Docker containers, but do not delete any as we need to delete the Machine to ensure node cordon/drain. + // Similarly, providers implementing MachinePool Machines will need to reconcile their analogous infrastructure instances (aside + // from deletion) before reconciling InfraMachinePoolMachines. + if err := r.reconcileDockerContainers(ctx, cluster, machinePool, devMachinePool); err != nil { + return ctrl.Result{}, err + } + + // Second, once the Docker containers are created, reconcile the DevMachines. This function creates a DevMachine for each newly created Docker + // container, and handles container deletion. Instead of deleting an infrastructure instance directly, we want to delete the owner Machine. This will + // trigger a cordon and drain of the node, as well as trigger the deletion of the DevMachine, which in turn causes the Docker container to be deleted. + // Similarly, providers will need to create InfraMachines for each instance, and instead of deleting instances directly, delete the owner Machine. + if err := r.reconcileDevMachines(ctx, cluster, machinePool, devMachinePool); err != nil { + return ctrl.Result{}, err + } + + // Fetch the list of DevMachines to ensure the provider IDs are up to date. + devMachineList, err := getDevMachines(ctx, r.Client, *cluster, *machinePool, *devMachinePool) + if err != nil { + return ctrl.Result{}, err + } + + // Derive providerIDList from the provider ID on each DevMachine if it exists. The providerID is set by the DevMachine controller. + devMachinePool.Spec.ProviderIDList = []string{} + for _, devMachine := range devMachineList.Items { + if devMachine.Spec.ProviderID != "" { + devMachinePool.Spec.ProviderIDList = append(devMachinePool.Spec.ProviderIDList, devMachine.Spec.ProviderID) + } + } + // Ensure the providerIDList is deterministic (getDevMachines doesn't guarantee a specific order) + sort.Strings(devMachinePool.Spec.ProviderIDList) + + devMachinePool.Status.Replicas = int32(len(devMachineList.Items)) + + if devMachinePool.Spec.ProviderID == "" { + // This is a fake provider ID which does not tie back to any docker infrastructure. In cloud providers, + // this ID would tie back to the resource which manages the machine pool implementation. For example, + // Azure uses a VirtualMachineScaleSet to manage a set of like machines. + devMachinePool.Spec.ProviderID = getDevMachinePoolProviderID(cluster.Name, devMachinePool.Name) + } + + if len(devMachinePool.Spec.ProviderIDList) == int(*machinePool.Spec.Replicas) && len(devMachineList.Items) == int(*machinePool.Spec.Replicas) { + devMachinePool.Status.Ready = true + conditions.Set(devMachinePool, metav1.Condition{ + Type: infrav1.ReplicasReadyCondition, + Status: metav1.ConditionTrue, + Reason: infrav1.ReplicasReadyReason, + }) + + return ctrl.Result{}, nil + } + + return ctrl.Result{}, nil +} + +// ReconcileDelete handle docker backend for delete DevMachinePool. +func (r *MachinePoolBackEndReconciler) ReconcileDelete(ctx context.Context, cluster *clusterv1.Cluster, machinePool *clusterv1.MachinePool, devMachinePool *infrav1.DevMachinePool) (ctrl.Result, error) { + if devMachinePool.Spec.Template.Docker == nil { + return ctrl.Result{}, errors.New("DockerMachinePoolBackEndReconciler can't be called for DevMachinePools without a Docker backend") + } + + log := ctrl.LoggerFrom(ctx) + + devMachineList, err := getDevMachines(ctx, r.Client, *cluster, *machinePool, *devMachinePool) + if err != nil { + return ctrl.Result{}, err + } + + if len(devMachineList.Items) > 0 { + log.Info("DevMachinePool still has dependent DevMachines, deleting them first and requeuing", "count", len(devMachineList.Items)) + + var errs []error + + for _, devMachine := range devMachineList.Items { + if !devMachine.GetDeletionTimestamp().IsZero() { + // Don't handle deleted child + continue + } + + if err := r.deleteMachinePoolMachine(ctx, devMachine); err != nil { + err = errors.Wrapf(err, "error deleting DevMachinePool %s/%s: failed to delete %s %s", devMachinePool.Namespace, devMachinePool.Name, devMachine.Namespace, devMachine.Name) + errs = append(errs, err) + } + } + + if len(errs) > 0 { + return ctrl.Result{}, kerrors.NewAggregate(errs) + } + return ctrl.Result{}, nil + } + + // Once there are no DevMachines left, ensure there are no Docker containers left behind. + // This can occur if deletion began after containers were created but before the DevMachines were created, or if creation of a DevMachine failed. + log.Info("DevMachines have been deleted, deleting any remaining Docker containers") + + labelFilters := map[string]string{devMachinePoolLabel: devMachinePool.Name} + // List Docker containers, i.e. external machines in the cluster. + externalMachines, err := docker.ListMachinesByCluster(ctx, cluster, labelFilters) + if err != nil { + return ctrl.Result{}, errors.Wrapf(err, "failed to list all machines in the cluster with label \"%s:%s\"", devMachinePoolLabel, devMachinePool.Name) + } + + // Providers should similarly ensure that all infrastructure instances are deleted even if the InfraMachine has not been created yet. + for _, externalMachine := range externalMachines { + log.Info("Deleting Docker container", "container", externalMachine.Name()) + if err := externalMachine.Delete(ctx); err != nil { + return ctrl.Result{}, errors.Wrapf(err, "failed to delete machine %s", externalMachine.Name()) + } + } + + // Once all DockerMachines and Docker containers are deleted, remove the finalizer. + controllerutil.RemoveFinalizer(devMachinePool, infrav1.MachinePoolFinalizer) + + return ctrl.Result{}, nil +} + +// PatchDevMachinePool patch a DevMachinePool. +func (r *MachinePoolBackEndReconciler) PatchDevMachinePool(ctx context.Context, patchHelper *patch.Helper, devMachinePool *infrav1.DevMachinePool) error { + if devMachinePool.Spec.Template.Docker == nil { + return errors.New("DockerMachinePoolBackEndReconciler can't be called for DevMachinePools without a Docker backend") + } + + // Always update the readyCondition by summarizing the state of other conditions. + // A step counter is added to represent progress during the provisioning process (instead we are hiding it during the deletion process). + if err := conditions.SetSummaryCondition(devMachinePool, devMachinePool, infrav1.DevMachinePoolReadyCondition, + conditions.ForConditionTypes{ + infrav1.ReplicasReadyCondition, + }, + // Using a custom merge strategy to override reasons applied during merge. + conditions.CustomMergeStrategy{ + MergeStrategy: conditions.DefaultMergeStrategy( + // Use custom reasons. + conditions.ComputeReasonFunc(conditions.GetDefaultComputeMergeReasonFunc( + infrav1.DevMachinePoolNotReadyReason, + infrav1.DevMachinePoolReadyUnknownReason, + infrav1.DevMachinePoolReadyReason, + )), + ), + }, + ); err != nil { + return errors.Wrapf(err, "failed to set %s condition", infrav1.DevMachinePoolReadyCondition) + } + + // Patch the object, ignoring conflicts on the conditions owned by this controller. + return patchHelper.Patch( + ctx, + devMachinePool, + patch.WithOwnedConditions{Conditions: []string{ + clusterv1.PausedCondition, + infrav1.DevMachinePoolReadyCondition, + infrav1.ReplicasReadyCondition, + }}, + ) +} + +// reconcileDockerContainers manages the Docker containers for a MachinePool such that it +// - Ensures the number of up-to-date Docker containers is equal to the MachinePool's desired replica count. +// - Does not delete any containers as that must be triggered in reconcileDockerMachines to ensure node cordon/drain. +// +// Providers should similarly create their infrastructure instances and reconcile any additional logic. +func (r *MachinePoolBackEndReconciler) reconcileDockerContainers(ctx context.Context, cluster *clusterv1.Cluster, machinePool *clusterv1.MachinePool, devMachinePool *infrav1.DevMachinePool) error { + log := ctrl.LoggerFrom(ctx) + + log.V(2).Info("Reconciling Docker containers", "DevMachinePool", klog.KObj(devMachinePool)) + + labelFilters := map[string]string{devMachinePoolLabel: devMachinePool.Name} + + machines, err := docker.ListMachinesByCluster(ctx, cluster, labelFilters) + if err != nil { + return errors.Wrapf(err, "failed to list all machines in the cluster") + } + + matchingMachineCount := len(machinesMatchingInfrastructureSpec(ctx, machines, machinePool, devMachinePool)) + numToCreate := int(*machinePool.Spec.Replicas) - matchingMachineCount + for range numToCreate { + log.V(2).Info("Creating a new Docker container for machinePool", "MachinePool", klog.KObj(machinePool)) + name := fmt.Sprintf("worker-%s", util.RandomString(6)) + if err := createDockerContainer(ctx, name, cluster, machinePool, devMachinePool); err != nil { + return errors.Wrap(err, "failed to create a new docker machine") + } + } + + return nil +} + +// reconcileDevMachines creates and deletes DevMachines to match the MachinePool's desired number of replicas and infrastructure spec. +// It is responsible for +// - Ensuring each Docker container has an associated DevMachine by creating one if it doesn't already exist. +// - Ensuring that deletion for Docker container happens by calling delete on the associated Machine so that the node is cordoned/drained and the infrastructure is cleaned up. +// - Deleting DevMachines referencing a container whose Kubernetes version or custom image no longer matches the spec. +// - Deleting DevMachines that correspond to a deleted/non-existent Docker container. +// - Deleting DevMachines when scaling down such that DevMachines whose owner Machine has the clusterv1.DeleteMachineAnnotation is given priority. +func (r *MachinePoolBackEndReconciler) reconcileDevMachines(ctx context.Context, cluster *clusterv1.Cluster, machinePool *clusterv1.MachinePool, devMachinePool *infrav1.DevMachinePool) error { + log := ctrl.LoggerFrom(ctx) + + log.V(2).Info("Reconciling DevMachines", "DevMachinePool", klog.KObj(devMachinePool)) + + devMachineList, err := getDevMachines(ctx, r.Client, *cluster, *machinePool, *devMachinePool) + if err != nil { + return err + } + + devMachineMap := make(map[string]infrav1.DevMachine) + for _, devMachine := range devMachineList.Items { + devMachineMap[devMachine.Name] = devMachine + } + + // List the Docker containers. This corresponds to a InfraMachinePool instance for providers. + labelFilters := map[string]string{devMachinePoolLabel: devMachinePool.Name} + externalMachines, err := docker.ListMachinesByCluster(ctx, cluster, labelFilters) + if err != nil { + return errors.Wrapf(err, "failed to list all machines in the cluster") + } + + externalMachineMap := make(map[string]*docker.Machine) + for _, externalMachine := range externalMachines { + externalMachineMap[externalMachine.Name()] = externalMachine + } + + // Step 1: + // Create a DevMachine for each Docker container so we surface the information to the user. Use the same name as the Docker container for the Dev Machine for ease of lookup. + // Providers should iterate through their infrastructure instances and ensure that each instance has a corresponding InfraMachine. + for _, machine := range externalMachines { + if existingMachine, ok := devMachineMap[machine.Name()]; ok { + log.V(2).Info("Patching existing DevMachine", "DevMachine", klog.KObj(&existingMachine)) + desiredMachine := computeDesiredDevMachine(machine.Name(), cluster, machinePool, devMachinePool, &existingMachine) + if err := ssa.Patch(ctx, r.Client, devMachinePoolControllerName, desiredMachine, ssa.WithCachingProxy{Cache: r.SsaCache, Original: &existingMachine}); err != nil { + return errors.Wrapf(err, "failed to update DockerMachine %q", klog.KObj(desiredMachine)) + } + + devMachineMap[desiredMachine.Name] = *desiredMachine + } else { + log.V(2).Info("Creating a new DevMachine for Docker container", "container", machine.Name()) + desiredMachine := computeDesiredDevMachine(machine.Name(), cluster, machinePool, devMachinePool, nil) + if err := ssa.Patch(ctx, r.Client, devMachinePoolControllerName, desiredMachine); err != nil { + return errors.Wrap(err, "failed to create a new dev machine") + } + + devMachineMap[desiredMachine.Name] = *desiredMachine + } + } + + // Step 2: + // Delete any DevMachine that correspond to a deleted Docker container. + // Providers should iterate through the InfraMachines to ensure each one still corresponds to an existing infrastructure instance. + // This allows the InfraMachine (and owner Machine) to be deleted and avoid hanging resources when a user deletes an instance out-of-band. + for _, devMachine := range devMachineMap { + if _, ok := externalMachineMap[devMachine.Name]; !ok { + devMachine := devMachine + log.V(2).Info("Deleting DevMachine with no underlying infrastructure", "DevMachine", klog.KObj(&devMachine)) + if err := r.deleteMachinePoolMachine(ctx, devMachine); err != nil { + return err + } + + delete(devMachineMap, devMachine.Name) + } + } + + // Step 3: + // This handles the scale down/excess replicas case and the case where a rolling upgrade is needed. + // If there are more ready DevMachines than desired replicas, start to delete the excess DevMachines such that + // - DevMachines with an outdated Kubernetes version or custom image are deleted first (i.e. the rolling upgrade). + // - DevMachines whose owner Machine contains the clusterv1.DeleteMachineAnnotation are deleted next (to support cluster autoscaler). + // Note: we want to ensure that there are always enough ready DevMachines before deleting anything or scaling down. + + // For each DevMachine, fetch the owner Machine and copy the clusterv1.DeleteMachineAnnotation to the DevMachine if it exists before sorting the DevMachines. + // This is done just before sorting to guarantee we have the latest copy of the Machine annotations. + devMachinesWithAnnotation, err := r.propagateMachineDeleteAnnotation(ctx, devMachineMap) + if err != nil { + return err + } + + // Sort DockerMachines with the clusterv1.DeleteMachineAnnotation to the front of each list. + // If providers already have a sorting order for instance deletion, i.e. oldest first or newest first, the clusterv1.DeleteMachineAnnotation must take priority. + // For example, if deleting by oldest, we expect the InfraMachines with clusterv1.DeleteMachineAnnotation to be deleted first followed by the oldest, and the second oldest, etc. + orderedDevMachines := orderByDeleteMachineAnnotation(devMachinesWithAnnotation) + + // Note: this includes DockerMachines that are out of date but still ready. This is to ensure we always have enough ready DockerMachines before deleting anything. + totalReadyMachines := 0 + for i := range orderedDevMachines { + devMachine := orderedDevMachines[i] + // TODO (v1beta2): test for v1beta2 conditions + if ptr.Deref(devMachine.Status.Initialization.Provisioned, false) || v1beta1conditions.IsTrue(&devMachine, clusterv1.ReadyV1Beta1Condition) { + totalReadyMachines++ + } + } + + outdatedMachines, readyMachines, err := r.getDeletionCandidates(ctx, orderedDevMachines, externalMachineMap, machinePool, devMachinePool) + if err != nil { + return err + } + + desiredReplicas := int(*machinePool.Spec.Replicas) + overProvisionCount := totalReadyMachines - desiredReplicas + + // Loop through outdated DevMachines first and decrement the overProvisionCount until it reaches 0. + for _, devMachine := range outdatedMachines { + if overProvisionCount > 0 { + devMachine := devMachine + log.V(2).Info("Deleting DevMachine because it is outdated", "DevMachine", klog.KObj(&devMachine)) + if err := r.deleteMachinePoolMachine(ctx, devMachine); err != nil { + return err + } + + overProvisionCount-- + } + } + + // Then, loop through the ready DevMachines first and decrement the overProvisionCount until it reaches 0. + for _, devMachine := range readyMachines { + if overProvisionCount > 0 { + devMachine := devMachine + log.V(2).Info("Deleting Devmachine because it is an excess replica", "DevMachine", klog.KObj(&devMachine)) + if err := r.deleteMachinePoolMachine(ctx, devMachine); err != nil { + return err + } + + overProvisionCount-- + } + } + + return nil +} + +// deleteMachinePoolMachine attempts to delete a DevMachine and its associated owner Machine if it exists. +func (r *MachinePoolBackEndReconciler) deleteMachinePoolMachine(ctx context.Context, devMachine infrav1.DevMachine) error { + log := ctrl.LoggerFrom(ctx) + + machine, err := util.GetOwnerMachine(ctx, r.Client, devMachine.ObjectMeta) + if err != nil { + return errors.Wrapf(err, "error getting owner Machine for DevMachine %s/%s", devMachine.Namespace, devMachine.Name) + } + // util.GetOwnerMachine() returns a nil Machine without error if there is no Machine kind in the ownerRefs, so we must verify that machine is not nil. + if machine == nil { + log.V(2).Info("No owner Machine exists for DevMachine", "devMachine", klog.KObj(&devMachine)) + + // If the DevMachine does not have an owner Machine, do not attempt to delete the DevMachine as the MachinePool controller will create the + // Machine and we want to let it catch up. If we are too hasty to delete, that introduces a race condition where the DevMachine could be deleted + // just as the Machine comes online. + + // In the case where the MachinePool is being deleted and the Machine will never come online, the DevMachine will be deleted via its ownerRef to the + // DevMachinePool, so that is covered as well. + + return nil + } + + log.Info("Deleting Machine for DevMachine", "Machine", klog.KObj(machine), "DevMachine", klog.KObj(&devMachine)) + + if err := r.Client.Delete(ctx, machine); err != nil { + return errors.Wrapf(err, "failed to delete Machine %s/%s", machine.Namespace, machine.Name) + } + + return nil +} + +// propagateMachineDeleteAnnotation returns the DevMachines for a MachinePool and for each DevMachine, it copies the owner +// Machine's delete annotation to each DevMachine if it's present. This is done just in time to ensure that the annotations are +// up to date when we sort for DevMachine deletion. +func (r *MachinePoolBackEndReconciler) propagateMachineDeleteAnnotation(ctx context.Context, devMachineMap map[string]infrav1.DevMachine) ([]infrav1.DevMachine, error) { + _ = ctrl.LoggerFrom(ctx) + + devMachines := []infrav1.DevMachine{} + for _, devMachine := range devMachineMap { + machine, err := util.GetOwnerMachine(ctx, r.Client, devMachine.ObjectMeta) + if err != nil { + return nil, errors.Wrapf(err, "error getting owner Machine for DockerMachine %s/%s", devMachine.Namespace, devMachine.Name) + } + if machine != nil && machine.Annotations != nil { + if devMachine.Annotations == nil { + devMachine.Annotations = map[string]string{} + } + if _, hasDeleteAnnotation := machine.Annotations[clusterv1.DeleteMachineAnnotation]; hasDeleteAnnotation { + devMachine.Annotations[clusterv1.DeleteMachineAnnotation] = machine.Annotations[clusterv1.DeleteMachineAnnotation] + } + } + + devMachines = append(devMachines, devMachine) + } + + return devMachines, nil +} + +// getDeletionCandidates returns the DevMachines for a MachinePool that do not match the infrastructure spec followed by any DevMachines that are ready and up to date, i.e. matching the infrastructure spec. +func (r *MachinePoolBackEndReconciler) getDeletionCandidates(ctx context.Context, devMachines []infrav1.DevMachine, externalMachineSet map[string]*docker.Machine, machinePool *clusterv1.MachinePool, devMachinePool *infrav1.DevMachinePool) (outdatedMachines []infrav1.DevMachine, readyMatchingMachines []infrav1.DevMachine, err error) { + for i := range devMachines { + devMachine := devMachines[i] + externalMachine, ok := externalMachineSet[devMachine.Name] + if !ok { + // Note: Since we deleted any DevMachines that do not have an associated Docker container earlier, we should never hit this case. + return nil, nil, errors.Errorf("failed to find externalMachine for DevMachine %s/%s", devMachine.Namespace, devMachine.Name) + } + + // TODO (v1beta2): test for v1beta2 conditions + if !isMachineMatchingInfrastructureSpec(ctx, externalMachine, machinePool, devMachinePool) { + outdatedMachines = append(outdatedMachines, devMachine) + } else if ptr.Deref(devMachine.Status.Initialization.Provisioned, false) || v1beta1conditions.IsTrue(&devMachine, clusterv1.ReadyV1Beta1Condition) { + readyMatchingMachines = append(readyMatchingMachines, devMachine) + } + } + + return outdatedMachines, readyMatchingMachines, nil +} + +// machinesMatchingInfrastructureSpec returns the Docker containers matching the custom image in the DockerMachinePool spec. +func machinesMatchingInfrastructureSpec(ctx context.Context, machines []*docker.Machine, machinePool *clusterv1.MachinePool, devMachinePool *infrav1.DevMachinePool) []*docker.Machine { + var matchingMachines []*docker.Machine + for _, machine := range machines { + if isMachineMatchingInfrastructureSpec(ctx, machine, machinePool, devMachinePool) { + matchingMachines = append(matchingMachines, machine) + } + } + + return matchingMachines +} + +// isMachineMatchingInfrastructureSpec returns true if the Docker container image matches the custom image in the DevMachinePool spec. +func isMachineMatchingInfrastructureSpec(_ context.Context, machine *docker.Machine, machinePool *clusterv1.MachinePool, dockerMachinePool *infrav1.DevMachinePool) bool { + // NOTE: With the current implementation we are checking if the machine is using a kindest/node image for the expected version, + // but not checking if the machine has the expected extra.mounts or pre.loaded images. + + semVer, err := semver.ParseTolerant(machinePool.Spec.Template.Spec.Version) + if err != nil { + // TODO: consider if to return an error + panic(errors.Wrap(err, "failed to parse DockerMachine version").Error()) + } + + kindMapping := kind.GetMapping(semVer, dockerMachinePool.Spec.Template.Docker.CustomImage) + + return machine.ContainerImage() == kindMapping.Image +} + +// createDockerContainer creates a Docker container to serve as a replica for the MachinePool. +func createDockerContainer(ctx context.Context, name string, cluster *clusterv1.Cluster, machinePool *clusterv1.MachinePool, devMachinePool *infrav1.DevMachinePool) error { + log := ctrl.LoggerFrom(ctx) + labelFilters := map[string]string{devMachinePoolLabel: devMachinePool.Name} + externalMachine, err := docker.NewMachine(ctx, cluster, name, labelFilters) + if err != nil { + return errors.Wrapf(err, "failed to create helper for managing the externalMachine named %s", name) + } + + // NOTE: FailureDomains don't mean much in CAPD since it's all local, but we are setting a label on + // each container, so we can check placement. + labels := map[string]string{} + for k, v := range labelFilters { + labels[k] = v + } + + if len(machinePool.Spec.FailureDomains) > 0 { + // For MachinePools placement is expected to be managed by the underlying infrastructure primitive, but + // given that there is no such an thing in CAPD, we are picking a random failure domain. + randomIndex := rand.Intn(len(machinePool.Spec.FailureDomains)) //nolint:gosec + for k, v := range docker.FailureDomainLabel(machinePool.Spec.FailureDomains[randomIndex]) { + labels[k] = v + } + } + + // If re-entering the reconcile loop and reaching this point, the container is expected to be running. If it is not, delete it so we can try to create it again. + if externalMachine.Exists() && !externalMachine.IsRunning() { + // This deletes the machine and results in re-creating it below. + if err := externalMachine.Delete(ctx); err != nil { + return errors.Wrap(err, "Failed to delete not running DockerMachine") + } + } + + log.Info("Creating container for machinePool", "name", name, "MachinePool", klog.KObj(machinePool), "machinePool.Spec.Template.Spec.Version", machinePool.Spec.Template.Spec.Version) + if err := externalMachine.Create(ctx, devMachinePool.Spec.Template.Docker.CustomImage, constants.WorkerNodeRoleValue, machinePool.Spec.Template.Spec.Version, labels, devMachinePool.Spec.Template.Docker.ExtraMounts); err != nil { + return errors.Wrapf(err, "failed to create docker machine with name %s", name) + } + return nil +} + +func getDevMachines(ctx context.Context, c client.Client, cluster clusterv1.Cluster, machinePool clusterv1.MachinePool, devMachinePool infrav1.DevMachinePool) (*infrav1.DevMachineList, error) { + devMachineList := &infrav1.DevMachineList{} + labels := map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + clusterv1.MachinePoolNameLabel: machinePool.Name, + } + if err := c.List(ctx, devMachineList, client.InNamespace(devMachinePool.Namespace), client.MatchingLabels(labels)); err != nil { + return nil, err + } + + return devMachineList, nil +} + +// computeDesiredDevMachine creates a Devmachine to represent a Docker container in a DevMachinePool. +// These DevMachines have the clusterv1.ClusterNameLabel and clusterv1.MachinePoolNameLabel to support MachinePool Machines. +func computeDesiredDevMachine(name string, cluster *clusterv1.Cluster, machinePool *clusterv1.MachinePool, devMachinePool *infrav1.DevMachinePool, existingDevMachine *infrav1.DevMachine) *infrav1.DevMachine { + devMachine := &infrav1.DevMachine{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: devMachinePool.Namespace, + Name: name, + Labels: make(map[string]string), + Annotations: make(map[string]string), + }, + Spec: infrav1.DevMachineSpec{ + Backend: infrav1.DevMachineBackendSpec{ + Docker: &infrav1.DockerMachineBackendSpec{ + CustomImage: devMachinePool.Spec.Template.Docker.CustomImage, + PreLoadImages: devMachinePool.Spec.Template.Docker.PreLoadImages, + ExtraMounts: devMachinePool.Spec.Template.Docker.ExtraMounts, + }, + }, + }, + } + + if existingDevMachine != nil { + devMachine.SetUID(existingDevMachine.UID) + devMachine.SetOwnerReferences(existingDevMachine.OwnerReferences) + } + + // Note: Since the MachinePool controller has not created its owner Machine yet, we want to set the DevMachinePool as the owner so it's not orphaned. + devMachine.SetOwnerReferences(util.EnsureOwnerRef(devMachine.OwnerReferences, metav1.OwnerReference{ + APIVersion: infrav1.GroupVersion.String(), + Kind: "DevMachinePool", + Name: devMachinePool.Name, + UID: devMachinePool.UID, + })) + devMachine.Labels[clusterv1.ClusterNameLabel] = cluster.Name + devMachine.Labels[clusterv1.MachinePoolNameLabel] = format.MustFormatValue(machinePool.Name) + + return devMachine +} + +// orderByDeleteMachineAnnotation will sort DevrMachines with the clusterv1.DeleteMachineAnnotation to the front of the list. +// It will preserve the existing order of the list otherwise so that it respects the existing delete priority otherwise. +func orderByDeleteMachineAnnotation(machines []infrav1.DevMachine) []infrav1.DevMachine { + sort.SliceStable(machines, func(i, _ int) bool { + _, iHasAnnotation := machines[i].Annotations[clusterv1.DeleteMachineAnnotation] + + return iHasAnnotation + }) + + return machines +} + +func getDevMachinePoolProviderID(clusterName, devMachinePoolName string) string { + return fmt.Sprintf("dev:////%s-dmp-%s", clusterName, devMachinePoolName) +} diff --git a/test/infrastructure/docker/internal/controllers/backends/machinepool.go b/test/infrastructure/docker/internal/controllers/backends/machinepool.go new file mode 100644 index 000000000000..c2b46fa35f64 --- /dev/null +++ b/test/infrastructure/docker/internal/controllers/backends/machinepool.go @@ -0,0 +1,34 @@ +/* +Copyright 2026 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backends + +import ( + "context" + + ctrl "sigs.k8s.io/controller-runtime" + + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta2" + "sigs.k8s.io/cluster-api/util/patch" +) + +// DevMachinePoolBackendReconciler defines reconciler behaviour for a DevMachinePool backend. +type DevMachinePoolBackendReconciler interface { + ReconcileNormal(ctx context.Context, cluster *clusterv1.Cluster, machinePool *clusterv1.MachinePool, devMachinePool *infrav1.DevMachinePool) (ctrl.Result, error) + ReconcileDelete(ctx context.Context, cluster *clusterv1.Cluster, machinePool *clusterv1.MachinePool, devMachinePool *infrav1.DevMachinePool) (ctrl.Result, error) + PatchDevMachinePool(ctx context.Context, patchHelper *patch.Helper, devMachinePool *infrav1.DevMachinePool) error +} diff --git a/test/infrastructure/docker/internal/controllers/devmachinepool_controller.go b/test/infrastructure/docker/internal/controllers/devmachinepool_controller.go new file mode 100644 index 000000000000..4220df517e5b --- /dev/null +++ b/test/infrastructure/docker/internal/controllers/devmachinepool_controller.go @@ -0,0 +1,249 @@ +/* +Copyright 2026 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/tools/record" + "k8s.io/klog/v2" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/cluster-api/controllers/external" + capicontrollerutil "sigs.k8s.io/cluster-api/internal/util/controller" + "sigs.k8s.io/cluster-api/internal/util/ssa" + "sigs.k8s.io/cluster-api/test/infrastructure/container" + infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta2" + "sigs.k8s.io/cluster-api/test/infrastructure/docker/internal/controllers/backends" + dockerbackend "sigs.k8s.io/cluster-api/test/infrastructure/docker/internal/controllers/backends/docker" + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/cluster-api/util/predicates" +) + +const ( + devMachinePoolControllerName = "devmachinepool-controller" +) + +// DevMachinePoolReconciler reconciles a DevMachinePool object. +type DevMachinePoolReconciler struct { + Client client.Client + ContainerRuntime container.Runtime + + // WatchFilterValue is the label value used to filter events prior to reconciliation. + WatchFilterValue string + + recorder record.EventRecorder + externalTracker external.ObjectTracker +} + +// SetupWithManager will add watches for this controller. +func (r *DevMachinePoolReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { + if r.Client == nil || r.ContainerRuntime == nil { + return errors.New("Client and ContainerRuntime must not be nil") + } + + predicateLog := ctrl.LoggerFrom(ctx).WithValues("controller", "devmachinepool") + clusterToDevMachinePools, err := util.ClusterToTypedObjectsMapper(mgr.GetClient(), &infrav1.DevMachinePoolList{}, mgr.GetScheme()) + if err != nil { + return err + } + + c, err := capicontrollerutil.NewControllerManagedBy(mgr, predicateLog). + For(&infrav1.DevMachinePool{}). + WithOptions(options). + WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), predicateLog, r.WatchFilterValue)). + Watches( + &clusterv1.MachinePool{}, + handler.EnqueueRequestsFromMapFunc(util.MachinePoolToInfrastructureMapFunc(ctx, + infrav1.GroupVersion.WithKind("DevMachinePool"))), + ). + Watches( + &infrav1.DevMachine{}, + handler.EnqueueRequestsFromMapFunc(devMachineToDevMachinePool), + ). + Watches( + &clusterv1.Cluster{}, + handler.EnqueueRequestsFromMapFunc(clusterToDevMachinePools), + predicates.ClusterPausedTransitionsOrInfrastructureProvisioned(mgr.GetScheme(), predicateLog), + ).Build(r) + if err != nil { + return errors.Wrap(err, "failed setting up with a controller manager") + } + + r.recorder = mgr.GetEventRecorderFor(devMachinePoolControllerName) + r.externalTracker = external.ObjectTracker{ + Controller: c, + Cache: mgr.GetCache(), + Scheme: mgr.GetScheme(), + PredicateLogger: &predicateLog, + } + + return nil +} + +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=devmachinepools,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=devmachinepools/status;devmachinepools/finalizers,verbs=get;list;watch;patch;update +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinepools;machinepools/status,verbs=get;list;watch +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines,verbs=get;list;watch;delete +// +kubebuilder:rbac:groups="",resources=secrets;,verbs=get;list;watch + +func (r *DevMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, rerr error) { + log := ctrl.LoggerFrom(ctx) + ctx = container.RuntimeInto(ctx, r.ContainerRuntime) + + log.Info("Its not called") + + // Fetch the DevMachinePool instance. + devMachinePool := &infrav1.DevMachinePool{} + if err := r.Client.Get(ctx, req.NamespacedName, devMachinePool); err != nil { + if apierrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{}, err + } + + // Fetch the MachinePool. + machinePool, err := util.GetOwnerMachinePool(ctx, r.Client, devMachinePool.ObjectMeta) + if err != nil { + return ctrl.Result{}, err + } + + if machinePool == nil { + // Note: If ownerRef was not set, there is nothing to delete. Remove finalizer so deletion can succeed. + if !devMachinePool.DeletionTimestamp.IsZero() { + if controllerutil.ContainsFinalizer(devMachinePool, infrav1.MachinePoolFinalizer) { + devMachinePoolWithoutFinalizer := devMachinePool.DeepCopy() + controllerutil.RemoveFinalizer(devMachinePoolWithoutFinalizer, infrav1.MachinePoolFinalizer) + if err := r.Client.Patch(ctx, devMachinePoolWithoutFinalizer, client.MergeFrom(devMachinePool)); err != nil { + return ctrl.Result{}, errors.Wrapf(err, "failed to patch DevMachinePool %s", klog.KObj(devMachinePool)) + } + } + return ctrl.Result{}, nil + } + + log.Info("Waiting for MachinePool Controller to set OwnerRef on DevMachinePool") + return ctrl.Result{}, nil + } + + log = log.WithValues("MachinePool", machinePool.Name) + ctx = ctrl.LoggerInto(ctx, log) + + // Fetch the Cluster. + cluster, err := util.GetClusterFromMetadata(ctx, r.Client, machinePool.ObjectMeta) + if err != nil { + log.Info("DevMachinePool owner MachinePool is missing cluster label or cluster does not exist") + return ctrl.Result{}, err + } + + if cluster == nil { + log.Info(fmt.Sprintf("Please associate this machine pool with a cluster using the label %s: ", clusterv1.ClusterNameLabel)) + return ctrl.Result{}, nil + } + + log = log.WithValues("Cluster", klog.KObj(cluster)) + ctx = ctrl.LoggerInto(ctx, log) + + // Initialize the patch helper + patchHelper, err := patch.NewHelper(devMachinePool, r.Client) + if err != nil { + return ctrl.Result{}, err + } + + backendReconciler := r.backendReconcilerFactory() + + // Always attempt to Patch the DevMachinePool object and status after each reconciliation. + defer func() { + if err := backendReconciler.PatchDevMachinePool(ctx, patchHelper, devMachinePool); err != nil { + rerr = kerrors.NewAggregate([]error{rerr, err}) + } + }() + + // Handle deleted machines + if !devMachinePool.DeletionTimestamp.IsZero() { + return backendReconciler.ReconcileDelete(ctx, cluster, machinePool, devMachinePool) + } + + // Add finalizer and the InfrastructureMachineKind if they aren't already present, and requeue if either were added. + // We want to add the finalizer here to avoid the race condition between init and delete. + // Note: Finalizers in general can only be added when the deletionTimestamp is not set. + needsPatch := controllerutil.AddFinalizer(devMachinePool, infrav1.MachinePoolFinalizer) + needsPatch = setInfrastructureMachineKindForMachinePool(devMachinePool) || needsPatch + if needsPatch { + return ctrl.Result{}, nil + } + + // Handle non-deleted clusters + return backendReconciler.ReconcileNormal(ctx, cluster, machinePool, devMachinePool) +} + +func (r *DevMachinePoolReconciler) backendReconcilerFactory() backends.DevMachinePoolBackendReconciler { + return &dockerbackend.MachinePoolBackEndReconciler{ + Client: r.Client, + ContainerRuntime: r.ContainerRuntime, + SsaCache: ssa.NewCache("devmachinepool"), + } +} + +// devMachineToDevMachinePool creates a mapping handler to transform DevMachine to DevMachinePool. +func devMachineToDevMachinePool(_ context.Context, o client.Object) []ctrl.Request { + devMachine, ok := o.(*infrav1.DevMachine) + if !ok { + panic(fmt.Sprintf("Expected a DevMachine but got a %T", o)) + } + + for _, ownerRef := range devMachine.GetOwnerReferences() { + gv, err := schema.ParseGroupVersion(ownerRef.APIVersion) + if err != nil { + return nil + } + if ownerRef.Kind == "DevMachinePool" && gv.Group == infrav1.GroupVersion.Group { + return []ctrl.Request{ + { + NamespacedName: types.NamespacedName{ + Name: ownerRef.Name, + Namespace: devMachine.Namespace, + }, + }, + } + } + } + + return nil +} + +// setInfrastructureMachineKindForMachinePool sets the infrastructure machine kind in the status if it is not set already to support +// MachinePool Machines and returns a boolean indicating if the status was updated. +func setInfrastructureMachineKindForMachinePool(devMachinePool *infrav1.DevMachinePool) bool { + if devMachinePool != nil && devMachinePool.Status.InfrastructureMachineKind != "DevMachine" { + devMachinePool.Status.InfrastructureMachineKind = "DevMachine" + return true + } + + return false +} diff --git a/test/infrastructure/docker/main.go b/test/infrastructure/docker/main.go index 39c113ae0b6c..22c1a48943f4 100644 --- a/test/infrastructure/docker/main.go +++ b/test/infrastructure/docker/main.go @@ -497,6 +497,17 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { setupLog.Error(err, "Unable to create controller", "controller", "DevMachineTemplate") os.Exit(1) } + + if feature.Gates.Enabled(feature.MachinePool) { + if err := (&controllers.DevMachinePoolReconciler{ + Client: mgr.GetClient(), + ContainerRuntime: runtimeClient, + WatchFilterValue: watchFilterValue, + }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: concurrency}); err != nil { + setupLog.Error(err, "Unable to create controller", "controller", "DevMachinePool") + os.Exit(1) + } + } } func setupWebhooks(mgr ctrl.Manager) { diff --git a/test/infrastructure/docker/templates/cluster-template-dev-mp.yaml b/test/infrastructure/docker/templates/cluster-template-dev-mp.yaml new file mode 100644 index 000000000000..2bda0d0808bd --- /dev/null +++ b/test/infrastructure/docker/templates/cluster-template-dev-mp.yaml @@ -0,0 +1,23 @@ +apiVersion: cluster.x-k8s.io/v1beta2 +kind: Cluster +metadata: + name: "${CLUSTER_NAME}" + namespace: "${NAMESPACE}" +spec: + clusterNetwork: + services: + cidrBlocks: ${SERVICE_CIDR:=["10.128.0.0/12"]} + pods: + cidrBlocks: ${POD_CIDR:=["192.168.0.0/16"]} + serviceDomain: ${SERVICE_DOMAIN:="cluster.local"} + topology: + classRef: + name: dev-quick-start + controlPlane: + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + version: ${KUBERNETES_VERSION} + workers: + machinePools: + - class: default-dev-worker + name: mp-0 + replicas: ${WORKER_MACHINE_COUNT} diff --git a/test/infrastructure/docker/templates/clusterclass-dev-quick-start.yaml b/test/infrastructure/docker/templates/clusterclass-dev-quick-start.yaml new file mode 100644 index 000000000000..287114e55153 --- /dev/null +++ b/test/infrastructure/docker/templates/clusterclass-dev-quick-start.yaml @@ -0,0 +1,146 @@ +apiVersion: cluster.x-k8s.io/v1beta2 +kind: ClusterClass +metadata: + name: dev-quick-start +spec: + controlPlane: + templateRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 + kind: KubeadmControlPlaneTemplate + name: dev-quick-start-control-plane + machineInfrastructure: + templateRef: + kind: DevMachineTemplate + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + name: dev-quick-start-control-plane + healthCheck: + checks: + unhealthyNodeConditions: + - type: Ready + status: Unknown + timeoutSeconds: 300 + - type: Ready + status: "False" + timeoutSeconds: 300 + unhealthyMachineConditions: + - type: "NodeReady" + status: Unknown + timeoutSeconds: 300 + - type: "NodeReady" + status: "False" + timeoutSeconds: 300 + infrastructure: + templateRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + kind: DevClusterTemplate + name: dev-quick-start-cluster + workers: + machinePools: + - class: default-dev-worker + bootstrap: + templateRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 + kind: KubeadmConfigTemplate + name: dev-quick-start-default-worker-bootstraptemplate + infrastructure: + templateRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + kind: DevMachinePoolTemplate + name: dev-quick-start-default-dev-worker-machinepooltemplate +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: DevClusterTemplate +metadata: + name: dev-quick-start-cluster +spec: + template: + spec: + backend: + docker: + failureDomains: + - name: fd1 + controlPlane: true + - name: fd2 + controlPlane: true + - name: fd3 + controlPlane: true + - name: fd4 + controlPlane: true + - name: fd5 + controlPlane: true + - name: fd6 + controlPlane: false + - name: fd7 + controlPlane: false + - name: fd8 + controlPlane: false +--- +kind: KubeadmControlPlaneTemplate +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 +metadata: + name: dev-quick-start-control-plane +spec: + template: + spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + # host.docker.internal is required by kubetest when running on MacOS because of the way ports are proxied. + certSANs: [localhost, 127.0.0.1, 0.0.0.0, host.docker.internal] + initConfiguration: + nodeRegistration: # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. + kubeletExtraArgs: # having a not empty kubeletExtraArgs is required for the externalCloudProvider patch to work + - name: eviction-hard + value: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + joinConfiguration: + nodeRegistration: # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. + kubeletExtraArgs: # having a not empty kubeletExtraArgs is required for the externalCloudProvider patch to work + - name: eviction-hard + value: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: DevMachineTemplate +metadata: + name: dev-quick-start-control-plane +spec: + template: + spec: + backend: + docker: + extraMounts: + - containerPath: "/var/run/docker.sock" + hostPath: "/var/run/docker.sock" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: DockerMachineTemplate +metadata: + name: dev-quick-start-default-worker-machinetemplate +spec: + template: + spec: + extraMounts: + - containerPath: "/var/run/docker.sock" + hostPath: "/var/run/docker.sock" +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 +kind: KubeadmConfigTemplate +metadata: + name: dev-quick-start-default-worker-bootstraptemplate +spec: + template: + spec: + joinConfiguration: + nodeRegistration: # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. + kubeletExtraArgs: # having a not empty kubeletExtraArgs is required for the externalCloudProvider to work + - name: eviction-hard + value: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: DevMachinePoolTemplate +metadata: + name: dev-quick-start-default-dev-worker-machinepooltemplate +spec: + template: + spec: + template: + docker: {} From 50a357b5bae5c4456824495acab59dd2c2a8980c Mon Sep 17 00:00:00 2001 From: Karthik Bhat Date: Mon, 9 Mar 2026 17:41:33 +0530 Subject: [PATCH 2/2] Adderss reveiw comments --- .../api/v1beta2/devmachinepool_types.go | 41 +++-- .../api/v1beta2/zz_generated.deepcopy.go | 49 ++++-- ...ture.cluster.x-k8s.io_devmachinepools.yaml | 30 ++-- ...ster.x-k8s.io_devmachinepooltemplates.yaml | 26 ++-- .../docker/config/crd/kustomization.yaml | 3 +- .../patches/webhook_in_devmachinepools.yaml | 16 ++ .../webhook_in_devmachinepooltemplates.yaml | 16 ++ .../docker/dockermachinepool_backend.go | 16 +- .../controllers/devmachinepool_controller.go | 3 +- .../internal/webhooks/dockermachinepool.go | 31 ---- test/infrastructure/docker/main.go | 7 - .../templates/cluster-template-dev-mp.yaml | 23 --- .../clusterclass-dev-quick-start.yaml | 146 ------------------ test/infrastructure/docker/webhooks/alias.go | 8 - 14 files changed, 135 insertions(+), 280 deletions(-) create mode 100644 test/infrastructure/docker/config/crd/patches/webhook_in_devmachinepools.yaml create mode 100644 test/infrastructure/docker/config/crd/patches/webhook_in_devmachinepooltemplates.yaml delete mode 100644 test/infrastructure/docker/internal/webhooks/dockermachinepool.go delete mode 100644 test/infrastructure/docker/templates/cluster-template-dev-mp.yaml delete mode 100644 test/infrastructure/docker/templates/clusterclass-dev-quick-start.yaml diff --git a/test/infrastructure/docker/api/v1beta2/devmachinepool_types.go b/test/infrastructure/docker/api/v1beta2/devmachinepool_types.go index 2f4f9df4bf30..8457f86b02b5 100644 --- a/test/infrastructure/docker/api/v1beta2/devmachinepool_types.go +++ b/test/infrastructure/docker/api/v1beta2/devmachinepool_types.go @@ -22,11 +22,6 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) -const ( - // DevMachinePoolFinalizer allows ReconcileDevMachinePool to clean up resources. - DevMachinePoolFinalizer = "devmachinepool.infrastructure.cluster.x-k8s.io" -) - const ( // ReplicasReadyCondition reports an aggregate of current status of the replicas controlled by the MachinePool. ReplicasReadyCondition string = "ReplicasReady" @@ -78,6 +73,24 @@ type DevMachinePoolList struct { Items []DevMachinePool `json:"items"` } +// DockerMachinePoolBackendSpec defines the desired state of DockerMachine. +type DockerMachinePoolBackendSpec struct { + // CustomImage allows customizing the container image that is used for + // running the machine + // +optional + CustomImage string `json:"customImage,omitempty"` + + // PreLoadImages allows to pre-load images in a newly created machine. This can be used to + // speed up tests by avoiding e.g. to download CNI images on all the containers. + // +optional + PreLoadImages []string `json:"preLoadImages,omitempty"` + + // ExtraMounts describes additional mount points for the node container + // These may be used to bind a hostPath + // +optional + ExtraMounts []Mount `json:"extraMounts,omitempty"` +} + // DevMachinePoolSpec defines the desired state of DevMachinePool. type DevMachinePoolSpec struct { // ProviderID is the identification ID of the Machine Pool @@ -88,16 +101,16 @@ type DevMachinePoolSpec struct { // +optional ProviderIDList []string `json:"providerIDList,omitempty"` - // Template contains the details used to build a replica machine within the Machine Pool + // backend contains the details used to build a replica machine within the Machine Pool // +optional - Template DevMachinePoolBackendTemplate `json:"template"` + Backend DevMachinePoolBackendSpec `json:"backend"` } -// DevMachinePoolBackendTemplate defines backends for a DevMachinePool. -type DevMachinePoolBackendTemplate struct { - // docker defines a backend for a DevMachine using docker containers. +// DevMachinePoolBackendSpec defines backends for a DevMachinePool. +type DevMachinePoolBackendSpec struct { + // docker defines a backend for a DevMachinePool using docker containers. // +optional - Docker *DockerMachinePoolMachineTemplate `json:"docker,omitempty"` + Docker *DockerMachinePoolBackendSpec `json:"docker,omitempty"` } // DevMachinePoolStatus defines the observed state of DevMachinePool. @@ -128,11 +141,11 @@ type DevMachinePoolStatus struct { // Instances contains the status for each instance in the pool // +optional - Instances []DevMachinePoolBackendInstanceStatus `json:"instances,omitempty"` + Instances []DevMachinePoolInstanceStatus `json:"instances,omitempty"` } -// DevMachinePoolBackendInstanceStatus contains status information about a DevMachinePool instances. -type DevMachinePoolBackendInstanceStatus struct { +// DevMachinePoolInstanceStatus contains status information about a DevMachinePool instances. +type DevMachinePoolInstanceStatus struct { // docker define backend status for a DevMachine for a machine using docker containers. // +optional Docker *DockerMachinePoolInstanceStatus `json:"docker,omitempty"` diff --git a/test/infrastructure/docker/api/v1beta2/zz_generated.deepcopy.go b/test/infrastructure/docker/api/v1beta2/zz_generated.deepcopy.go index e6bb4e2cba3c..a529c7a94002 100644 --- a/test/infrastructure/docker/api/v1beta2/zz_generated.deepcopy.go +++ b/test/infrastructure/docker/api/v1beta2/zz_generated.deepcopy.go @@ -519,41 +519,41 @@ func (in *DevMachinePool) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DevMachinePoolBackendInstanceStatus) DeepCopyInto(out *DevMachinePoolBackendInstanceStatus) { +func (in *DevMachinePoolBackendSpec) DeepCopyInto(out *DevMachinePoolBackendSpec) { *out = *in if in.Docker != nil { in, out := &in.Docker, &out.Docker - *out = new(DockerMachinePoolInstanceStatus) + *out = new(DockerMachinePoolBackendSpec) (*in).DeepCopyInto(*out) } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevMachinePoolBackendInstanceStatus. -func (in *DevMachinePoolBackendInstanceStatus) DeepCopy() *DevMachinePoolBackendInstanceStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevMachinePoolBackendSpec. +func (in *DevMachinePoolBackendSpec) DeepCopy() *DevMachinePoolBackendSpec { if in == nil { return nil } - out := new(DevMachinePoolBackendInstanceStatus) + out := new(DevMachinePoolBackendSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DevMachinePoolBackendTemplate) DeepCopyInto(out *DevMachinePoolBackendTemplate) { +func (in *DevMachinePoolInstanceStatus) DeepCopyInto(out *DevMachinePoolInstanceStatus) { *out = *in if in.Docker != nil { in, out := &in.Docker, &out.Docker - *out = new(DockerMachinePoolMachineTemplate) + *out = new(DockerMachinePoolInstanceStatus) (*in).DeepCopyInto(*out) } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevMachinePoolBackendTemplate. -func (in *DevMachinePoolBackendTemplate) DeepCopy() *DevMachinePoolBackendTemplate { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevMachinePoolInstanceStatus. +func (in *DevMachinePoolInstanceStatus) DeepCopy() *DevMachinePoolInstanceStatus { if in == nil { return nil } - out := new(DevMachinePoolBackendTemplate) + out := new(DevMachinePoolInstanceStatus) in.DeepCopyInto(out) return out } @@ -598,7 +598,7 @@ func (in *DevMachinePoolSpec) DeepCopyInto(out *DevMachinePoolSpec) { *out = make([]string, len(*in)) copy(*out, *in) } - in.Template.DeepCopyInto(&out.Template) + in.Backend.DeepCopyInto(&out.Backend) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevMachinePoolSpec. @@ -623,7 +623,7 @@ func (in *DevMachinePoolStatus) DeepCopyInto(out *DevMachinePoolStatus) { } if in.Instances != nil { in, out := &in.Instances, &out.Instances - *out = make([]DevMachinePoolBackendInstanceStatus, len(*in)) + *out = make([]DevMachinePoolInstanceStatus, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1407,6 +1407,31 @@ func (in *DockerMachinePool) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerMachinePoolBackendSpec) DeepCopyInto(out *DockerMachinePoolBackendSpec) { + *out = *in + if in.PreLoadImages != nil { + in, out := &in.PreLoadImages, &out.PreLoadImages + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExtraMounts != nil { + in, out := &in.ExtraMounts, &out.ExtraMounts + *out = make([]Mount, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerMachinePoolBackendSpec. +func (in *DockerMachinePoolBackendSpec) DeepCopy() *DockerMachinePoolBackendSpec { + if in == nil { + return nil + } + out := new(DockerMachinePoolBackendSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DockerMachinePoolDeprecatedStatus) DeepCopyInto(out *DockerMachinePoolDeprecatedStatus) { *out = *in diff --git a/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_devmachinepools.yaml b/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_devmachinepools.yaml index 5b6a91f11546..de3674eb811f 100644 --- a/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_devmachinepools.yaml +++ b/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_devmachinepools.yaml @@ -46,22 +46,13 @@ spec: spec: description: DevMachinePoolSpec defines the desired state of DevMachinePool. properties: - providerID: - description: ProviderID is the identification ID of the Machine Pool - type: string - providerIDList: - description: ProviderIDList is the list of identification IDs of machine - instances managed by this Machine Pool - items: - type: string - type: array - template: - description: Template contains the details used to build a replica + backend: + description: backend contains the details used to build a replica machine within the Machine Pool properties: docker: - description: docker defines a backend for a DevMachine using docker - containers. + description: docker defines a backend for a DevMachinePool using + docker containers. properties: customImage: description: |- @@ -100,6 +91,15 @@ spec: type: array type: object type: object + providerID: + description: ProviderID is the identification ID of the Machine Pool + type: string + providerIDList: + description: ProviderIDList is the list of identification IDs of machine + instances managed by this Machine Pool + items: + type: string + type: array type: object status: description: DevMachinePoolStatus defines the observed state of DevMachinePool. @@ -175,8 +175,8 @@ spec: description: Instances contains the status for each instance in the pool items: - description: DevMachinePoolBackendInstanceStatus contains status - information about a DevMachinePool instances. + description: DevMachinePoolInstanceStatus contains status information + about a DevMachinePool instances. properties: docker: description: docker define backend status for a DevMachine for diff --git a/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_devmachinepooltemplates.yaml b/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_devmachinepooltemplates.yaml index 6449b36dc24d..438fe425d29f 100644 --- a/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_devmachinepooltemplates.yaml +++ b/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_devmachinepooltemplates.yaml @@ -79,22 +79,12 @@ spec: spec: description: DevMachinePoolSpec defines the desired state of DevMachinePool. properties: - providerID: - description: ProviderID is the identification ID of the Machine - Pool - type: string - providerIDList: - description: ProviderIDList is the list of identification - IDs of machine instances managed by this Machine Pool - items: - type: string - type: array - template: - description: Template contains the details used to build a + backend: + description: backend contains the details used to build a replica machine within the Machine Pool properties: docker: - description: docker defines a backend for a DevMachine + description: docker defines a backend for a DevMachinePool using docker containers. properties: customImage: @@ -134,6 +124,16 @@ spec: type: array type: object type: object + providerID: + description: ProviderID is the identification ID of the Machine + Pool + type: string + providerIDList: + description: ProviderIDList is the list of identification + IDs of machine instances managed by this Machine Pool + items: + type: string + type: array type: object required: - spec diff --git a/test/infrastructure/docker/config/crd/kustomization.yaml b/test/infrastructure/docker/config/crd/kustomization.yaml index 392fe2b8b254..4e46d090ef8f 100644 --- a/test/infrastructure/docker/config/crd/kustomization.yaml +++ b/test/infrastructure/docker/config/crd/kustomization.yaml @@ -28,7 +28,6 @@ resources: patches: # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. # patches here are for enabling the conversion webhook for each CRD -- path: patches/webhook_in_dockermachinepools.yaml - path: patches/webhook_in_dockermachines.yaml - path: patches/webhook_in_dockermachinetemplates.yaml - path: patches/webhook_in_dockerclusters.yaml @@ -38,6 +37,8 @@ patches: - path: patches/webhook_in_devmachinetemplates.yaml - path: patches/webhook_in_devclusters.yaml - path: patches/webhook_in_devclustertemplates.yaml +- path: patches/webhook_in_devmachinepools.yaml +- path: patches/webhook_in_devmachinepooltemplates.yaml # +kubebuilder:scaffold:crdkustomizewebhookpatch # the following config is for teaching kustomize how to do kustomization for CRDs. diff --git a/test/infrastructure/docker/config/crd/patches/webhook_in_devmachinepools.yaml b/test/infrastructure/docker/config/crd/patches/webhook_in_devmachinepools.yaml new file mode 100644 index 000000000000..58e1213d5184 --- /dev/null +++ b/test/infrastructure/docker/config/crd/patches/webhook_in_devmachinepools.yaml @@ -0,0 +1,16 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: devmachinepools.infrastructure.cluster.x-k8s.io +spec: + conversion: + strategy: Webhook + webhook: + conversionReviewVersions: ["v1"] + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert diff --git a/test/infrastructure/docker/config/crd/patches/webhook_in_devmachinepooltemplates.yaml b/test/infrastructure/docker/config/crd/patches/webhook_in_devmachinepooltemplates.yaml new file mode 100644 index 000000000000..cb9181f1f6df --- /dev/null +++ b/test/infrastructure/docker/config/crd/patches/webhook_in_devmachinepooltemplates.yaml @@ -0,0 +1,16 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: devmachinepooltemplates.infrastructure.cluster.x-k8s.io +spec: + conversion: + strategy: Webhook + webhook: + conversionReviewVersions: ["v1"] + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert diff --git a/test/infrastructure/docker/internal/controllers/backends/docker/dockermachinepool_backend.go b/test/infrastructure/docker/internal/controllers/backends/docker/dockermachinepool_backend.go index 136127479064..41030e4421db 100644 --- a/test/infrastructure/docker/internal/controllers/backends/docker/dockermachinepool_backend.go +++ b/test/infrastructure/docker/internal/controllers/backends/docker/dockermachinepool_backend.go @@ -62,7 +62,7 @@ type MachinePoolBackEndReconciler struct { // ReconcileNormal handle docker backend for DevMachinePool not yet deleted. func (r *MachinePoolBackEndReconciler) ReconcileNormal(ctx context.Context, cluster *clusterv1.Cluster, machinePool *clusterv1.MachinePool, devMachinePool *infrav1.DevMachinePool) (ctrl.Result, error) { - if devMachinePool.Spec.Template.Docker == nil { + if devMachinePool.Spec.Backend.Docker == nil { return ctrl.Result{}, errors.New("DockerMachinePoolBackEndReconciler can't be called for DevMachinePools without a Docker backend") } @@ -134,7 +134,7 @@ func (r *MachinePoolBackEndReconciler) ReconcileNormal(ctx context.Context, clus // ReconcileDelete handle docker backend for delete DevMachinePool. func (r *MachinePoolBackEndReconciler) ReconcileDelete(ctx context.Context, cluster *clusterv1.Cluster, machinePool *clusterv1.MachinePool, devMachinePool *infrav1.DevMachinePool) (ctrl.Result, error) { - if devMachinePool.Spec.Template.Docker == nil { + if devMachinePool.Spec.Backend.Docker == nil { return ctrl.Result{}, errors.New("DockerMachinePoolBackEndReconciler can't be called for DevMachinePools without a Docker backend") } @@ -195,7 +195,7 @@ func (r *MachinePoolBackEndReconciler) ReconcileDelete(ctx context.Context, clus // PatchDevMachinePool patch a DevMachinePool. func (r *MachinePoolBackEndReconciler) PatchDevMachinePool(ctx context.Context, patchHelper *patch.Helper, devMachinePool *infrav1.DevMachinePool) error { - if devMachinePool.Spec.Template.Docker == nil { + if devMachinePool.Spec.Backend.Docker == nil { return errors.New("DockerMachinePoolBackEndReconciler can't be called for DevMachinePools without a Docker backend") } @@ -503,7 +503,7 @@ func isMachineMatchingInfrastructureSpec(_ context.Context, machine *docker.Mach panic(errors.Wrap(err, "failed to parse DockerMachine version").Error()) } - kindMapping := kind.GetMapping(semVer, dockerMachinePool.Spec.Template.Docker.CustomImage) + kindMapping := kind.GetMapping(semVer, dockerMachinePool.Spec.Backend.Docker.CustomImage) return machine.ContainerImage() == kindMapping.Image } @@ -542,7 +542,7 @@ func createDockerContainer(ctx context.Context, name string, cluster *clusterv1. } log.Info("Creating container for machinePool", "name", name, "MachinePool", klog.KObj(machinePool), "machinePool.Spec.Template.Spec.Version", machinePool.Spec.Template.Spec.Version) - if err := externalMachine.Create(ctx, devMachinePool.Spec.Template.Docker.CustomImage, constants.WorkerNodeRoleValue, machinePool.Spec.Template.Spec.Version, labels, devMachinePool.Spec.Template.Docker.ExtraMounts); err != nil { + if err := externalMachine.Create(ctx, devMachinePool.Spec.Backend.Docker.CustomImage, constants.WorkerNodeRoleValue, machinePool.Spec.Template.Spec.Version, labels, devMachinePool.Spec.Backend.Docker.ExtraMounts); err != nil { return errors.Wrapf(err, "failed to create docker machine with name %s", name) } return nil @@ -574,9 +574,9 @@ func computeDesiredDevMachine(name string, cluster *clusterv1.Cluster, machinePo Spec: infrav1.DevMachineSpec{ Backend: infrav1.DevMachineBackendSpec{ Docker: &infrav1.DockerMachineBackendSpec{ - CustomImage: devMachinePool.Spec.Template.Docker.CustomImage, - PreLoadImages: devMachinePool.Spec.Template.Docker.PreLoadImages, - ExtraMounts: devMachinePool.Spec.Template.Docker.ExtraMounts, + CustomImage: devMachinePool.Spec.Backend.Docker.CustomImage, + PreLoadImages: devMachinePool.Spec.Backend.Docker.PreLoadImages, + ExtraMounts: devMachinePool.Spec.Backend.Docker.ExtraMounts, }, }, }, diff --git a/test/infrastructure/docker/internal/controllers/devmachinepool_controller.go b/test/infrastructure/docker/internal/controllers/devmachinepool_controller.go index 4220df517e5b..0e5d8d85bf50 100644 --- a/test/infrastructure/docker/internal/controllers/devmachinepool_controller.go +++ b/test/infrastructure/docker/internal/controllers/devmachinepool_controller.go @@ -35,13 +35,13 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/controllers/external" - capicontrollerutil "sigs.k8s.io/cluster-api/internal/util/controller" "sigs.k8s.io/cluster-api/internal/util/ssa" "sigs.k8s.io/cluster-api/test/infrastructure/container" infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta2" "sigs.k8s.io/cluster-api/test/infrastructure/docker/internal/controllers/backends" dockerbackend "sigs.k8s.io/cluster-api/test/infrastructure/docker/internal/controllers/backends/docker" "sigs.k8s.io/cluster-api/util" + capicontrollerutil "sigs.k8s.io/cluster-api/util/controller" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -244,6 +244,5 @@ func setInfrastructureMachineKindForMachinePool(devMachinePool *infrav1.DevMachi devMachinePool.Status.InfrastructureMachineKind = "DevMachine" return true } - return false } diff --git a/test/infrastructure/docker/internal/webhooks/dockermachinepool.go b/test/infrastructure/docker/internal/webhooks/dockermachinepool.go deleted file mode 100644 index 65e58b762193..000000000000 --- a/test/infrastructure/docker/internal/webhooks/dockermachinepool.go +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package webhooks - -import ( - ctrl "sigs.k8s.io/controller-runtime" - - infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta2" -) - -// DockerMachinePool implements a validating and defaulting webhook for DockerMachinePool. -type DockerMachinePool struct{} - -func (c *DockerMachinePool) SetupWebhookWithManager(mgr ctrl.Manager) error { - return ctrl.NewWebhookManagedBy(mgr, &infrav1.DockerMachinePool{}). - Complete() -} diff --git a/test/infrastructure/docker/main.go b/test/infrastructure/docker/main.go index 22c1a48943f4..107e89ab2474 100644 --- a/test/infrastructure/docker/main.go +++ b/test/infrastructure/docker/main.go @@ -526,13 +526,6 @@ func setupWebhooks(mgr ctrl.Manager) { os.Exit(1) } - if feature.Gates.Enabled(feature.MachinePool) { - if err := (&infrawebhooks.DockerMachinePool{}).SetupWebhookWithManager(mgr); err != nil { - setupLog.Error(err, "Unable to create webhook", "webhook", "DockerMachinePool") - os.Exit(1) - } - } - if err := (&infrawebhooks.DevMachine{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "Unable to create webhook", "webhook", "DevMachine") os.Exit(1) diff --git a/test/infrastructure/docker/templates/cluster-template-dev-mp.yaml b/test/infrastructure/docker/templates/cluster-template-dev-mp.yaml deleted file mode 100644 index 2bda0d0808bd..000000000000 --- a/test/infrastructure/docker/templates/cluster-template-dev-mp.yaml +++ /dev/null @@ -1,23 +0,0 @@ -apiVersion: cluster.x-k8s.io/v1beta2 -kind: Cluster -metadata: - name: "${CLUSTER_NAME}" - namespace: "${NAMESPACE}" -spec: - clusterNetwork: - services: - cidrBlocks: ${SERVICE_CIDR:=["10.128.0.0/12"]} - pods: - cidrBlocks: ${POD_CIDR:=["192.168.0.0/16"]} - serviceDomain: ${SERVICE_DOMAIN:="cluster.local"} - topology: - classRef: - name: dev-quick-start - controlPlane: - replicas: ${CONTROL_PLANE_MACHINE_COUNT} - version: ${KUBERNETES_VERSION} - workers: - machinePools: - - class: default-dev-worker - name: mp-0 - replicas: ${WORKER_MACHINE_COUNT} diff --git a/test/infrastructure/docker/templates/clusterclass-dev-quick-start.yaml b/test/infrastructure/docker/templates/clusterclass-dev-quick-start.yaml deleted file mode 100644 index 287114e55153..000000000000 --- a/test/infrastructure/docker/templates/clusterclass-dev-quick-start.yaml +++ /dev/null @@ -1,146 +0,0 @@ -apiVersion: cluster.x-k8s.io/v1beta2 -kind: ClusterClass -metadata: - name: dev-quick-start -spec: - controlPlane: - templateRef: - apiVersion: controlplane.cluster.x-k8s.io/v1beta2 - kind: KubeadmControlPlaneTemplate - name: dev-quick-start-control-plane - machineInfrastructure: - templateRef: - kind: DevMachineTemplate - apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 - name: dev-quick-start-control-plane - healthCheck: - checks: - unhealthyNodeConditions: - - type: Ready - status: Unknown - timeoutSeconds: 300 - - type: Ready - status: "False" - timeoutSeconds: 300 - unhealthyMachineConditions: - - type: "NodeReady" - status: Unknown - timeoutSeconds: 300 - - type: "NodeReady" - status: "False" - timeoutSeconds: 300 - infrastructure: - templateRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 - kind: DevClusterTemplate - name: dev-quick-start-cluster - workers: - machinePools: - - class: default-dev-worker - bootstrap: - templateRef: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 - kind: KubeadmConfigTemplate - name: dev-quick-start-default-worker-bootstraptemplate - infrastructure: - templateRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 - kind: DevMachinePoolTemplate - name: dev-quick-start-default-dev-worker-machinepooltemplate ---- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 -kind: DevClusterTemplate -metadata: - name: dev-quick-start-cluster -spec: - template: - spec: - backend: - docker: - failureDomains: - - name: fd1 - controlPlane: true - - name: fd2 - controlPlane: true - - name: fd3 - controlPlane: true - - name: fd4 - controlPlane: true - - name: fd5 - controlPlane: true - - name: fd6 - controlPlane: false - - name: fd7 - controlPlane: false - - name: fd8 - controlPlane: false ---- -kind: KubeadmControlPlaneTemplate -apiVersion: controlplane.cluster.x-k8s.io/v1beta2 -metadata: - name: dev-quick-start-control-plane -spec: - template: - spec: - kubeadmConfigSpec: - clusterConfiguration: - apiServer: - # host.docker.internal is required by kubetest when running on MacOS because of the way ports are proxied. - certSANs: [localhost, 127.0.0.1, 0.0.0.0, host.docker.internal] - initConfiguration: - nodeRegistration: # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. - kubeletExtraArgs: # having a not empty kubeletExtraArgs is required for the externalCloudProvider patch to work - - name: eviction-hard - value: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' - joinConfiguration: - nodeRegistration: # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. - kubeletExtraArgs: # having a not empty kubeletExtraArgs is required for the externalCloudProvider patch to work - - name: eviction-hard - value: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' ---- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 -kind: DevMachineTemplate -metadata: - name: dev-quick-start-control-plane -spec: - template: - spec: - backend: - docker: - extraMounts: - - containerPath: "/var/run/docker.sock" - hostPath: "/var/run/docker.sock" ---- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 -kind: DockerMachineTemplate -metadata: - name: dev-quick-start-default-worker-machinetemplate -spec: - template: - spec: - extraMounts: - - containerPath: "/var/run/docker.sock" - hostPath: "/var/run/docker.sock" ---- -apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 -kind: KubeadmConfigTemplate -metadata: - name: dev-quick-start-default-worker-bootstraptemplate -spec: - template: - spec: - joinConfiguration: - nodeRegistration: # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. - kubeletExtraArgs: # having a not empty kubeletExtraArgs is required for the externalCloudProvider to work - - name: eviction-hard - value: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' ---- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 -kind: DevMachinePoolTemplate -metadata: - name: dev-quick-start-default-dev-worker-machinepooltemplate -spec: - template: - spec: - template: - docker: {} diff --git a/test/infrastructure/docker/webhooks/alias.go b/test/infrastructure/docker/webhooks/alias.go index 18efb70e70d0..5bd4ab5afdb4 100644 --- a/test/infrastructure/docker/webhooks/alias.go +++ b/test/infrastructure/docker/webhooks/alias.go @@ -77,11 +77,3 @@ type DevMachineTemplate struct{} func (webhook *DevMachineTemplate) SetupWebhookWithManager(mgr ctrl.Manager) error { return (&webhooks.DevMachineTemplate{}).SetupWebhookWithManager(mgr) } - -// DockerMachinePool implements a validating and defaulting webhook for DockerMachinePool. -type DockerMachinePool struct{} - -// SetupWebhookWithManager sets up DockerMachinePool webhooks. -func (webhook *DockerMachinePool) SetupWebhookWithManager(mgr ctrl.Manager) error { - return (&webhooks.DockerMachinePool{}).SetupWebhookWithManager(mgr) -}