diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 4d064669..a8f10f6f 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -16,10 +16,13 @@ jobs: - uses: actions/setup-go@v6 with: go-version-file: go.mod + - name: build kube-api-linter + run: go -C hack/tools build -buildmode=plugin -o ${{ github.workspace }}/bin/kube-api-linter.so sigs.k8s.io/kube-api-linter/pkg/plugin - name: golangci-lint uses: golangci/golangci-lint-action@v8 with: - version: v2.1.6 + version: v2.6.0 + install-mode: goinstall args: '--timeout 10m' yamllint: diff --git a/.golangci.yml b/.golangci.yml index 692e3822..423afbd1 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -18,6 +18,7 @@ linters: - govet - importas - ineffassign + - kubeapilinter - loggercheck - misspell - nakedret @@ -31,6 +32,11 @@ linters: - unused - whitespace settings: + custom: + kubeapilinter: + path: bin/kube-api-linter.so + description: Kube API LInter lints Kube like APIs based on API conventions and best practices. + original-url: sigs.k8s.io/kube-api-linter importas: alias: - pkg: k8s.io/api/(\w+)/(v[\w\d]+) @@ -84,13 +90,15 @@ linters: - linters: - staticcheck text: QF100(2|3|8) + - linters: + - staticcheck + text: ST1001 + - linters: + - revive + text: dot-imports - linters: - unparam text: always receives - - path: _test\.go - text: should not use dot imports - - path: (framework|e2e)/.*.go - text: should not use dot imports - path: _test\.go text: cyclomatic complexity - linters: @@ -108,6 +116,9 @@ linters: - linters: - revive text: 'var-naming: avoid meaningless package names' + - path-except: api/v1alpha2 + linters: + - kubeapilinter paths: - zz_generated.*\.go$ - .*conversion.*\.go$ @@ -117,7 +128,6 @@ linters: - examples$ issues: max-issues-per-linter: 0 - max-same-issues: 0 formatters: enable: - gofmt diff --git a/Makefile b/Makefile index f7e57c12..438e8580 100644 --- a/Makefile +++ b/Makefile @@ -58,6 +58,7 @@ vet: ## Run go vet against code. .PHONY: lint lint: ## Run lint. + go -C $(TOOLS_DIR) build -buildmode=plugin -o $(LOCALBIN)/kube-api-linter.so sigs.k8s.io/kube-api-linter/pkg/plugin go run -modfile ./hack/tools/go.mod github.com/golangci/golangci-lint/v2/cmd/golangci-lint run # Package names to test diff --git a/PROJECT b/PROJECT index c6db8c5e..31d19def 100644 --- a/PROJECT +++ b/PROJECT @@ -50,4 +50,33 @@ resources: kind: ProxmoxClusterTemplate path: github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1 version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + domain: cluster.x-k8s.io + group: infrastructure + kind: ProxmoxMachine + path: github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha2 + version: v1alpha2 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: cluster.x-k8s.io + group: infrastructure + kind: ProxmoxCluster + path: github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha2 + version: v1alpha2 + webhooks: + defaulting: true + validation: true + webhookVersion: v2 +- api: + crdVersion: v1 + namespaced: true + domain: cluster.x-k8s.io + group: infrastructure + kind: ProxmoxMachineTemplate + path: github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha2 + version: v1alpha2 version: "3" diff --git a/README.md b/README.md index a826f49f..f8c3f360 100644 --- a/README.md +++ b/README.md @@ -16,15 +16,27 @@ Check out the [quickstart guide](./docs/Usage.md#quick-start) for launching a cl ## Compatibility with Cluster API and Kubernetes Versions This provider's versions are compatible with the following versions of Cluster API: -| | Cluster API v1beta1 (v1.4) | Cluster API v1beta1 (v1.5) | Cluster API v1beta1 (v1.6) | Cluster API v1beta1 (v1.7) | Cluster API v1beta1 (v1.8) | Cluster API v1beta1 (v1.9) | -|------------------------|:--------------------------:|:--------------------------:|:--------------------------:|:--------------------------:|:--------------------------:|:--------------------------:| -| CAPMOX v1alpha1 (v0.1) | ✓ | ✓ | ☓ | ☓ | ☓ | ☓ | -| CAPMOX v1alpha1 (v0.2) | ☓ | ✓ | ✓ | ☓ | ☓ | ☓ | -| CAPMOX v1alpha1 (v0.3) | ☓ | ✓ | ✓ | ✓ | ☓ | ☓ | -| CAPMOX v1alpha1 (v0.4) | ☓ | ✓ | ✓ | ✓ | ☓ | ☓ | -| CAPMOX v1alpha1 (v0.5) | ☓ | ☓ | ✓ | ✓ | ☓ | ☓ | -| CAPMOX v1alpha1 (v0.6) | ☓ | ☓ | ☓ | ✓ | ✓ | ☓ | -| CAPMOX v1alpha1 (v0.7) | ☓ | ☓ | ☓ | ☓ | ✓ | ✓ | +### CAPMOX v1alpha2 (current) + +Cluster API v1beta2 + +| | CAPI v1.11 | +|-------------|:----------:| +| CAPMOX v0.8 | ✓ | + +### CAPMOX v1alpha1 (obsolete) + +Cluster API v1beta1 + +| | CAPI v1.4 | CAPI v1.5 | CAPI v1.6 | CAPI v1.7 | CAPI v1.8 | CAPI v1.9 | CAPI v1.10 | +|-------------|:---------:|:---------:|:---------:|:---------:|:---------:|:---------:|:----------:| +| CAPMOX v0.1 | ✓ | ✓ | ☓ | ☓ | ☓ | ☓ | ☓ | +| CAPMOX v0.2 | ☓ | ✓ | ✓ | ☓ | ☓ | ☓ | ☓ | +| CAPMOX v0.3 | ☓ | ✓ | ✓ | ✓ | ☓ | ☓ | ☓ | +| CAPMOX v0.4 | ☓ | ✓ | ✓ | ✓ | ☓ | ☓ | ☓ | +| CAPMOX v0.5 | ☓ | ☓ | ✓ | ✓ | ☓ | ☓ | ☓ | +| CAPMOX v0.6 | ☓ | ☓ | ☓ | ✓ | ✓ | ☓ | ☓ | +| CAPMOX v0.7 | ☓ | ☓ | ☓ | ☓ | ✓ | ✓ | ✓ | (See [Kubernetes support matrix](https://cluster-api.sigs.k8s.io/reference/versions.html) of Cluster API versions). diff --git a/api/v1alpha1/proxmoxclustertemplate_types.go b/api/v1alpha1/proxmoxclustertemplate_types.go index 1412aa82..7359d096 100644 --- a/api/v1alpha1/proxmoxclustertemplate_types.go +++ b/api/v1alpha1/proxmoxclustertemplate_types.go @@ -38,7 +38,6 @@ type ProxmoxClusterTemplateResource struct { // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:resource:path=proxmoxclustertemplates,scope=Namespaced,categories=cluster-api,shortName=pct -// +kubebuilder:storageversion // ProxmoxClusterTemplate is the Schema for the proxmoxclustertemplates API. type ProxmoxClusterTemplate struct { diff --git a/api/v1alpha1/proxmoxmachine_types.go b/api/v1alpha1/proxmoxmachine_types.go index 535395fe..8bcb581d 100644 --- a/api/v1alpha1/proxmoxmachine_types.go +++ b/api/v1alpha1/proxmoxmachine_types.go @@ -563,7 +563,6 @@ type MetadataSettings struct { } // +kubebuilder:object:root=true -// +kubebuilder:storageversion // +kubebuilder:subresource:status // +kubebuilder:resource:path=proxmoxmachines,scope=Namespaced,categories=cluster-api;proxmox,shortName=moxm // +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels.cluster\\.x-k8s\\.io/cluster-name",description="Cluster to which this ProxmoxMachine belongs" diff --git a/api/v1alpha1/proxmoxmachine_types_test.go b/api/v1alpha1/proxmoxmachine_types_test.go index e6f14fb5..6c796f60 100644 --- a/api/v1alpha1/proxmoxmachine_types_test.go +++ b/api/v1alpha1/proxmoxmachine_types_test.go @@ -26,6 +26,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" + + . "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/consts" ) func defaultMachine() *ProxmoxMachine { @@ -201,7 +203,7 @@ var _ = Describe("ProxmoxMachine Test", func() { IPPoolConfig: IPPoolConfig{ IPv4PoolRef: &corev1.TypedLocalObjectReference{ APIGroup: ptr.To("ipam.cluster.x-k8s.io"), - Kind: "InClusterIPPool", + Kind: InClusterIPPool, Name: "some-pool", }, }, diff --git a/api/v1alpha1/proxmoxmachinetemplate_types.go b/api/v1alpha1/proxmoxmachinetemplate_types.go index 23e9a5fd..0cb1a07a 100644 --- a/api/v1alpha1/proxmoxmachinetemplate_types.go +++ b/api/v1alpha1/proxmoxmachinetemplate_types.go @@ -28,7 +28,6 @@ type ProxmoxMachineTemplateSpec struct { // +kubebuilder:object:root=true // +kubebuilder:resource:path=proxmoxmachinetemplates,scope=Namespaced,categories=cluster-api,shortName=pmt -// +kubebuilder:storageversion // ProxmoxMachineTemplate is the Schema for the proxmoxmachinetemplates API. type ProxmoxMachineTemplate struct { diff --git a/api/v1alpha2/conditions_consts.go b/api/v1alpha2/conditions_consts.go new file mode 100644 index 00000000..eacf12cd --- /dev/null +++ b/api/v1alpha2/conditions_consts.go @@ -0,0 +1,118 @@ +/* +Copyright 2023 IONOS Cloud. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + +const ( + // VMProvisionedCondition documents the status of the provisioning of a ProxmoxMachine and its underlying ProxmoxVM. + VMProvisionedCondition clusterv1.ConditionType = "VMProvisioned" + + // VMProvisionFailedReason used for failures during instance provisioning. + VMProvisionFailedReason = "VMProvisionFailed" + + // VMTerminatedReason used when vm is being terminated. + VMTerminatedReason = "VMTerminated" + + // WaitingForClusterInfrastructureReason (Severity=Info) documents a ProxmoxMachine waiting for the cluster + // infrastructure to be ready before starting the provisioning process. + // + // NOTE: This reason does not apply to ProxmoxVM (this state happens before the ProxmoxVM is actually created). + WaitingForClusterInfrastructureReason = "WaitingForClusterInfrastructure" + + // WaitingForBootstrapDataReason (Severity=Info) documents a ProxmoxMachine waiting for the bootstrap + // script to be ready before starting the provisioning process. + // + // NOTE: This reason does not apply to ProxmoxVM (this state happens before the ProxmoxVM is actually created). + WaitingForBootstrapDataReason = "WaitingForBootstrapData" + + // WaitingForBootstrapReadyReason (Severity=Info) documents a ProxmoxMachine waiting for the underlying + // Cluster-API machine to be ready. + // bootstrapping ISO. + WaitingForBootstrapReadyReason = "WaitingForBootstrapReady" + + // WaitingForCloudInitReason (Severity=Info) documents a ProxmoxVM waiting for CloudInit. + WaitingForCloudInitReason = "WaitingForCloudInit" + + // WaitingForClusterAPIMachineAddressesReason (Severity=Info) documents a machine assigning addresses + // host addresses for Cluster API. + WaitingForClusterAPIMachineAddressesReason = "WaitingForClusterAPIMachineAddresses" + + // WaitingForVMPowerUpReason (Severity=Info) documents a ProxmoxVM waiting for Proxmox to power it on. + WaitingForVMPowerUpReason = "WaitingForVMPowerUp" + + // WaitingForBootstrapDataReconcilationReason (Severity=Info) documents a ProxmoxVM waiting for the + // reconciliation of Bootstrap Data for cloud-init/ignition. + WaitingForBootstrapDataReconcilationReason = "WaitingForBootstrapDataReconcilation" + + // WaitingForStaticIPAllocationReason (Severity=Info) documents a ProxmoxVM waiting for the allocation of + // a static IP address. + WaitingForStaticIPAllocationReason = "WaitingForStaticIPAllocation" + + // WaitingForDiskReconcilationReason (Severity=Info) documents a ProxmoxVM waiting for the disks to resize. + WaitingForDiskReconcilationReason = "WaitingForDiskReconcilation" + + // WaitingForVirtualMachineConfigReason (Severity=Info) documents a ProxmoxVM waiting for VritualMachineConfig. + WaitingForVirtualMachineConfigReason = "WaitingForVirtualMachineConfig" + + // CloningReason documents (Severity=Info) a ProxmoxMachine/ProxmoxVM currently executing the clone operation. + CloningReason = "Cloning" + + // CloningFailedReason (Severity=Warning) documents a ProxmoxMachine/ProxmoxVM controller detecting + // an error while provisioning; those kind of errors are usually transient and failed provisioning + // are automatically re-tried by the controller. + CloningFailedReason = "CloningFailed" + + // PoweringOnReason documents (Severity=Info) a ProxmoxMachine/ProxmoxVM currently executing the power on sequence. + PoweringOnReason = "PoweringOn" + + // PoweringOnFailedReason (Severity=Warning) documents a ProxmoxMachine/ProxmoxVM controller detecting + // an error while powering on; those kind of errors are usually transient and failed provisioning + // are automatically re-tried by the controller. + PoweringOnFailedReason = "PoweringOnFailed" + + // VMProvisionStarted used for starting vm provisioning. + VMProvisionStarted = "VMProvisionStarted" + + // TaskFailure (Severity=Warning) documents a ProxmoxMachine/Proxmox task failure; the reconcile look will automatically + // retry the operation, but a user intervention might be required to fix the problem. + TaskFailure = "TaskFailure" + + // WaitingForNetworkAddressesReason (Severity=Info) documents a ProxmoxMachine waiting for the the machine network + // settings to be reported after machine being powered on. + // + // NOTE: This reason does not apply to ProxmoxVM (this state happens after the ProxmoxVM is in ready state). + WaitingForNetworkAddressesReason = "WaitingForNetworkAddresses" + + // NotFoundReason (Severity=Warning) documents the ProxmoxVM not found. + NotFoundReason = "NotFound" + + // UnknownReason (Severity=Warning) documents the ProxmoxVM Unknown. + UnknownReason = "Unknown" + + // MissingControlPlaneEndpointReason (Severity=Warning) documents the missing Control Plane endpoint when Cluster is backed by an externally managed Control Plane. + MissingControlPlaneEndpointReason = "MissingControlPlaneEndpoint" +) + +const ( + // ProxmoxClusterReady documents the status of ProxmoxCluster and its underlying resources. + ProxmoxClusterReady clusterv1.ConditionType = "ClusterReady" + + // ProxmoxUnreachableReason (Severity=Error) documents a controller detecting + // issues with Proxmox reachability. + ProxmoxUnreachableReason = "ProxmoxUnreachable" +) diff --git a/api/v1alpha2/groupversion_info.go b/api/v1alpha2/groupversion_info.go new file mode 100644 index 00000000..d3dc23f7 --- /dev/null +++ b/api/v1alpha2/groupversion_info.go @@ -0,0 +1,49 @@ +/* +Copyright 2023-2024 IONOS Cloud. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha2 contains API Schema definitions for the infrastructure v1alpha2 API group +// +kubebuilder:object:generate=true +// +groupName=infrastructure.cluster.x-k8s.io +package v1alpha2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + // GroupVersion is group version used to register these objects. + GroupVersion = schema.GroupVersion{Group: "infrastructure.cluster.x-k8s.io", Version: "v1alpha2"} + + // schemeBuilder is used to add go types to the GroupVersionKind scheme. + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = schemeBuilder.AddToScheme + + // In order to reduce dependencies for API package consumers, CAPI has diverged from the default kubebuilder scheme builder. + // This new pattern may also be useful for reducing dependencies in provider API packages. + // For more information see the implementers guide. + // https://main.cluster-api.sigs.k8s.io/developer/providers/implementers-guide/create_api#registering-apis-in-the-scheme + objectTypes = []runtime.Object{} +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, objectTypes...) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/api/v1alpha2/proxmoxcluster_types.go b/api/v1alpha2/proxmoxcluster_types.go new file mode 100644 index 00000000..3f7214ba --- /dev/null +++ b/api/v1alpha2/proxmoxcluster_types.go @@ -0,0 +1,427 @@ +/* +Copyright 2023-2024 IONOS Cloud. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/errors" //nolint:staticcheck + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + // ProxmoxClusterKind is the ProxmoxCluster kind. + ProxmoxClusterKind = "ProxmoxCluster" + // ClusterFinalizer allows cleaning up resources associated with a + // ProxmoxCluster before removing it from the apiserver. + ClusterFinalizer = "proxmoxcluster.infrastructure.cluster.x-k8s.io" + // SecretFinalizer is the finalizer for ProxmoxCluster credentials secrets. + SecretFinalizer = "proxmoxcluster.infrastructure.cluster.x-k8s.io/secret" //nolint:gosec +) + +// ProxmoxClusterSpec defines the desired state of a ProxmoxCluster. +type ProxmoxClusterSpec struct { + // controlPlaneEndpoint represents the endpoint used to communicate with the control plane. + // +optional + // +kubebuilder:validation:XValidation:rule="self.port > 0 && self.port < 65536",message="port must be within 1-65535" + ControlPlaneEndpoint *clusterv1.APIEndpoint `json:"controlPlaneEndpoint,omitempty"` + + // externalManagedControlPlane can be enabled to allow externally managed Control Planes to patch the + // Proxmox cluster with the Load Balancer IP provided by Control Plane provider. + // +optional + // +kubebuilder:default=false + ExternalManagedControlPlane *bool `json:"externalManagedControlPlane,omitempty"` + + // allowedNodes specifies all Proxmox nodes which will be considered + // for operations. This implies that VMs can be cloned on different nodes from + // the node which holds the VM template. + // +listType=set + // +optional + AllowedNodes []string `json:"allowedNodes,omitempty"` + + // schedulerHints allows to influence the decision on where a VM will be scheduled. For example by applying a multiplicator + // to a node's resources, to allow for overprovisioning or to ensure a node will always have a safety buffer. + // +optional + SchedulerHints *SchedulerHints `json:"schedulerHints,omitempty"` + + // ipv4Config contains information about available IPv4 address pools and the gateway. + // This can be combined with ipv6Config in order to enable dual stack. + // Either IPv4Config or IPv6Config must be provided. + // +optional + // +kubebuilder:validation:XValidation:rule="self.addresses.size() > 0",message="IPv4Config addresses must be provided" + IPv4Config *IPConfigSpec `json:"ipv4Config,omitempty"` + + // ipv6Config contains information about available IPv6 address pools and the gateway. + // This can be combined with ipv4Config in order to enable dual stack. + // Either IPv4Config or IPv6Config must be provided. + // +optional + // +kubebuilder:validation:XValidation:rule="self.addresses.size() > 0",message="IPv6Config addresses must be provided" + IPv6Config *IPConfigSpec `json:"ipv6Config,omitempty"` + + // dnsServers contains information about nameservers used by the machines. + // +required + // +listType=set + // +kubebuilder:validation:MinItems=1 + DNSServers []string `json:"dnsServers,omitempty"` + + // cloneSpec is the configuration pertaining to all items configurable + // in the configuration and cloning of a proxmox VM. Multiple types of nodes can be specified. + // +optional + CloneSpec *ProxmoxClusterCloneSpec `json:"cloneSpec,omitempty"` + + // credentialsRef is a reference to a Secret that contains the credentials to use for provisioning this cluster. If not + // supplied then the credentials of the controller will be used. + // if no namespace is provided, the namespace of the ProxmoxCluster will be used. + // +optional + CredentialsRef *corev1.SecretReference `json:"credentialsRef,omitempty"` +} + +// ProxmoxClusterCloneSpec is the configuration pertaining to all items configurable +// in the configuration and cloning of a proxmox VM. +type ProxmoxClusterCloneSpec struct { + // machineSpec is the map of machine specs + // +kubebuilder:validation:XValidation:rule="has(self.controlPlane)",message="Cowardly refusing to deploy cluster without control plane" + ProxmoxMachineSpec map[string]ProxmoxMachineSpec `json:"machineSpec"` + + // sshAuthorizedKeys contains the authorized keys deployed to the PROXMOX VMs. + // +listType=set + // +optional + SSHAuthorizedKeys []string `json:"sshAuthorizedKeys,omitempty"` + + // virtualIPNetworkInterface is the interface the k8s control plane binds to. + // +optional + VirtualIPNetworkInterface *string `json:"virtualIPNetworkInterface,omitempty"` +} + +// IPConfigSpec contains information about available IP config. +type IPConfigSpec struct { + // addresses is a list of IP addresses that can be assigned. This set of addresses can be non-contiguous. + // +required + // +listType=set + Addresses []string `json:"addresses,omitempty"` + + // prefix is the network prefix to use. + // +required + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=128 + Prefix int32 `json:"prefix,omitempty"` + + // gateway is the network gateway + // +required + // +kubebuilder:validation:MinLength=1 + Gateway string `json:"gateway,omitempty"` + + // metric is the route priority applied to the default gateway + // +required + // +kubebuilder:default=100 + // +kubebuilder:validation:Minimum=0 + Metric *int32 `json:"metric,omitempty"` +} + +// SchedulerHints allows to pass the scheduler instructions to (dis)allow over- or enforce underprovisioning of resources. +type SchedulerHints struct { + // memoryAdjustment allows to adjust a node's memory by a given percentage. + // For example, setting it to 300 allows to allocate 300% of a host's memory for VMs, + // and setting it to 95 limits memory allocation to 95% of a host's memory. + // Setting it to 0 entirely disables scheduling memory constraints. + // By default 100% of a node's memory will be used for allocation. + // +kubebuilder:validation:Minimum=0 + // +optional + MemoryAdjustment *int64 `json:"memoryAdjustment,omitempty"` +} + +// GetMemoryAdjustment returns the memory adjustment percentage to use within the scheduler. +func (sh *SchedulerHints) GetMemoryAdjustment() int64 { + memoryAdjustment := int64(100) + + if sh != nil { + memoryAdjustment = ptr.Deref(sh.MemoryAdjustment, 100) + } + + return memoryAdjustment +} + +// ProxmoxClusterStatus defines the observed state of a ProxmoxCluster. +type ProxmoxClusterStatus struct { + // ready indicates that the cluster is ready. + // +kubebuilder:default=false + // +optional + Ready *bool `json:"ready,omitempty"` + + // inClusterIpPoolRef is the reference to the created in-cluster IP pool. + // +listType=atomic + // +optional + InClusterIPPoolRef []corev1.LocalObjectReference `json:"inClusterIpPoolRef,omitempty"` + + // nodeLocations keeps track of which nodes have been selected + // for different machines. + // +optional + NodeLocations *NodeLocations `json:"nodeLocations,omitempty"` + + // failureReason will be set in the event that there is a terminal problem + // reconciling the Machine and will contain a succinct value suitable + // for machine interpretation. + // + // This field should not be set for transitive errors that a controller + // faces that are expected to be fixed automatically over + // time (like service outages), but instead indicate that something is + // fundamentally wrong with the Machine's spec or the configuration of + // the controller, and that manual intervention is required. Examples + // of terminal errors would be invalid combinations of settings in the + // spec, values that are unsupported by the controller, or the + // responsible controller itself being critically misconfigured. + // + // Any transient errors that occur during the reconciliation of ProxmoxCluster + // can be added as events to the ProxmoxCluster object and/or logged in the + // controller's output. + // +optional + FailureReason *errors.ClusterStatusError `json:"failureReason,omitempty"` + + // failureMessage will be set in the event that there is a terminal problem + // reconciling the Machine and will contain a more verbose string suitable + // for logging and human consumption. + // + // This field should not be set for transitive errors that a controller + // faces that are expected to be fixed automatically over + // time (like service outages), but instead indicate that something is + // fundamentally wrong with the Machine's spec or the configuration of + // the controller, and that manual intervention is required. Examples + // of terminal errors would be invalid combinations of settings in the + // spec, values that are unsupported by the controller, or the + // responsible controller itself being critically misconfigured. + // + // Any transient errors that occur during the reconciliation of ProxmoxMachines + // can be added as events to the ProxmoxCluster object and/or logged in the + // controller's output. + // +optional + FailureMessage *string `json:"failureMessage,omitempty"` + + // conditions defines current service state of the ProxmoxCluster. + Conditions clusterv1.Conditions `json:"conditions,omitempty"` +} + +// NodeLocations holds information about the deployment state of +// control plane and worker nodes in Proxmox. +type NodeLocations struct { + // controlPlane contains all deployed control plane nodes. + // +optional + // +listType=atomic + ControlPlane []NodeLocation `json:"controlPlane,omitempty"` + + // workers contains all deployed worker nodes. + // +optional + // +listType=atomic + Workers []NodeLocation `json:"workers,omitempty"` +} + +// NodeLocation holds information about a single VM +// in Proxmox. +type NodeLocation struct { + // machine is the reference to the ProxmoxMachine that the node is on. + // +required + Machine corev1.LocalObjectReference `json:"machine,omitempty"` + + // node is the Proxmox node. + // +kubebuilder:validation:MinLength=1 + // +required + Node string `json:"node,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:storageversion +// +kubebuilder:subresource:status +// +kubebuilder:resource:path=proxmoxclusters,scope=Namespaced,categories=cluster-api,singular=proxmoxcluster +// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels['cluster\\.x-k8s\\.io/cluster-name']",description="Cluster" +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Cluster infrastructure is ready" +// +kubebuilder:printcolumn:name="Endpoint",type="string",JSONPath=".spec.controlPlaneEndpoint",description="API Endpoint" + +// ProxmoxCluster is the Schema for the proxmoxclusters API. +type ProxmoxCluster struct { + metav1.TypeMeta `json:",inline"` + // metadata is the standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec is the Proxmox Cluster spec + // +kubebuilder:validation:XValidation:rule="self.ipv4Config != null || self.ipv6Config != null",message="at least one ip config must be set, either ipv4Config or ipv6Config" + // +required + Spec ProxmoxClusterSpec `json:"spec,omitzero"` + + // status is the Proxmox Cluster status + Status ProxmoxClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProxmoxClusterList contains a list of ProxmoxCluster. +type ProxmoxClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ProxmoxCluster `json:"items"` +} + +// GetConditions returns the observations of the operational state of the ProxmoxCluster resource. +func (c *ProxmoxCluster) GetConditions() clusterv1.Conditions { + return c.Status.Conditions +} + +// SetConditions sets the underlying service state of the ProxmoxCluster to the predescribed clusterv1.Conditions. +func (c *ProxmoxCluster) SetConditions(conditions clusterv1.Conditions) { + c.Status.Conditions = conditions +} + +// SetInClusterIPPoolRef will set the reference to the provided InClusterIPPool. +// If nil was provided, the status field will be cleared. +func (c *ProxmoxCluster) SetInClusterIPPoolRef(pool client.Object) { + if pool == nil || pool.GetName() == "" { + c.Status.InClusterIPPoolRef = nil + return + } + + if c.Status.InClusterIPPoolRef == nil { + c.Status.InClusterIPPoolRef = []corev1.LocalObjectReference{ + {Name: pool.GetName()}, + } + } + + found := false + for _, ref := range c.Status.InClusterIPPoolRef { + if ref.Name == pool.GetName() { + found = true + } + } + if !found { + c.Status.InClusterIPPoolRef = append(c.Status.InClusterIPPoolRef, corev1.LocalObjectReference{Name: pool.GetName()}) + } +} + +// AddNodeLocation will add a node location to either the control plane or worker +// node locations based on the isControlPlane parameter. +func (c *ProxmoxCluster) AddNodeLocation(loc NodeLocation, isControlPlane bool) { + if c.Status.NodeLocations == nil { + c.Status.NodeLocations = new(NodeLocations) + } + + if !c.HasMachine(loc.Machine.Name, isControlPlane) { + c.addNodeLocation(loc, isControlPlane) + } +} + +// RemoveNodeLocation removes a node location from the status. +func (c *ProxmoxCluster) RemoveNodeLocation(machineName string, isControlPlane bool) { + nodeLocations := c.Status.NodeLocations + + if nodeLocations == nil { + return + } + + if !c.HasMachine(machineName, isControlPlane) { + return + } + + if isControlPlane { + for i, v := range nodeLocations.ControlPlane { + if v.Machine.Name == machineName { + nodeLocations.ControlPlane = append(nodeLocations.ControlPlane[:i], nodeLocations.ControlPlane[i+1:]...) + } + } + return + } + + for i, v := range nodeLocations.Workers { + if v.Machine.Name == machineName { + nodeLocations.Workers = append(nodeLocations.Workers[:i], nodeLocations.Workers[i+1:]...) + } + } +} + +// UpdateNodeLocation will update the node location based on the provided machine name. +// If the node location does not exist, it will be added. +// +// The function returns true if the value was added or updated, otherwise false. +func (c *ProxmoxCluster) UpdateNodeLocation(machineName, node string, isControlPlane bool) bool { + if !c.HasMachine(machineName, isControlPlane) { + loc := NodeLocation{ + Node: node, + Machine: corev1.LocalObjectReference{Name: machineName}, + } + c.AddNodeLocation(loc, isControlPlane) + return true + } + + locations := c.Status.NodeLocations.Workers + if isControlPlane { + locations = c.Status.NodeLocations.ControlPlane + } + + for i, loc := range locations { + if loc.Machine.Name == machineName { + if loc.Node != node { + locations[i].Node = node + return true + } + + return false + } + } + + return false +} + +// HasMachine returns if true if a machine was found on any node. +func (c *ProxmoxCluster) HasMachine(machineName string, isControlPlane bool) bool { + return c.GetNode(machineName, isControlPlane) != "" +} + +// GetNode tries to return the Proxmox node for the provided machine name. +func (c *ProxmoxCluster) GetNode(machineName string, isControlPlane bool) string { + if c.Status.NodeLocations == nil { + return "" + } + + if isControlPlane { + for _, cpl := range c.Status.NodeLocations.ControlPlane { + if cpl.Machine.Name == machineName { + return cpl.Node + } + } + } else { + for _, wloc := range c.Status.NodeLocations.Workers { + if wloc.Machine.Name == machineName { + return wloc.Node + } + } + } + + return "" +} + +func (c *ProxmoxCluster) addNodeLocation(loc NodeLocation, isControlPlane bool) { + if isControlPlane { + c.Status.NodeLocations.ControlPlane = append(c.Status.NodeLocations.ControlPlane, loc) + return + } + + c.Status.NodeLocations.Workers = append(c.Status.NodeLocations.Workers, loc) +} + +func init() { + objectTypes = append(objectTypes, &ProxmoxCluster{}, &ProxmoxClusterList{}) +} diff --git a/api/v1alpha2/proxmoxcluster_types_test.go b/api/v1alpha2/proxmoxcluster_types_test.go new file mode 100644 index 00000000..0704a55c --- /dev/null +++ b/api/v1alpha2/proxmoxcluster_types_test.go @@ -0,0 +1,254 @@ +/* +Copyright 2023-2024 IONOS Cloud. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +import ( + "context" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + ipamicv1 "sigs.k8s.io/cluster-api-ipam-provider-in-cluster/api/v1alpha2" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func TestUpdateNodeLocation(t *testing.T) { + cl := ProxmoxCluster{ + Status: ProxmoxClusterStatus{}, + } + + res := cl.UpdateNodeLocation("new", "n1", false) + require.NotNil(t, cl.Status.NodeLocations) + require.Len(t, cl.Status.NodeLocations.Workers, 1) + require.True(t, res) + + locs := &NodeLocations{ + Workers: []NodeLocation{ + { + Machine: corev1.LocalObjectReference{Name: "m1"}, + Node: "n1", + }, + { + Machine: corev1.LocalObjectReference{Name: "m2"}, + Node: "n2", + }, + { + Machine: corev1.LocalObjectReference{Name: "m3"}, + Node: "n3", + }, + }, + } + + cl.Status.NodeLocations = locs + + res = cl.UpdateNodeLocation("m1", "n2", false) + require.True(t, res) + require.Len(t, cl.Status.NodeLocations.Workers, 3) + require.Equal(t, cl.Status.NodeLocations.Workers[0].Node, "n2") + + res = cl.UpdateNodeLocation("m4", "n4", false) + require.True(t, res) + require.Len(t, cl.Status.NodeLocations.Workers, 4) + require.Equal(t, cl.Status.NodeLocations.Workers[3].Node, "n4") + + res = cl.UpdateNodeLocation("m2", "n2", false) + require.False(t, res) + require.Len(t, cl.Status.NodeLocations.Workers, 4) +} + +func defaultCluster() *ProxmoxCluster { + return &ProxmoxCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: metav1.NamespaceDefault, + }, + Spec: ProxmoxClusterSpec{ + IPv4Config: &IPConfigSpec{ + Addresses: []string{"10.0.0.0/24"}, + Prefix: 24, + Gateway: "10.0.0.254", + Metric: ptr.To(int32(123)), + }, + DNSServers: []string{"1.2.3.4"}, + CloneSpec: &ProxmoxClusterCloneSpec{ + ProxmoxMachineSpec: map[string]ProxmoxMachineSpec{ + "controlPlane": { + VirtualMachineCloneSpec: VirtualMachineCloneSpec{ + TemplateSource: TemplateSource{ + SourceNode: ptr.To("pve1"), + }, + }, + }, + }, + }, + }, + } +} + +var _ = Describe("ProxmoxCluster Test", func() { + AfterEach(func() { + err := k8sClient.Delete(context.Background(), defaultCluster()) + Expect(client.IgnoreNotFound(err)).To(Succeed()) + }) + + Context("ClusterPort", func() { + It("Should not allow ports higher than 65535", func() { + dc := defaultCluster() + dc.Spec.ControlPlaneEndpoint = &clusterv1.APIEndpoint{ + Port: 65536, + } + Expect(k8sClient.Create(context.Background(), dc)).Should(MatchError(ContainSubstring("port must be within 1-65535"))) + }) + + It("Should not allow port 0", func() { + dc := defaultCluster() + dc.Spec.ControlPlaneEndpoint = &clusterv1.APIEndpoint{ + Port: 0, + } + Expect(k8sClient.Create(context.Background(), dc)).Should(MatchError(ContainSubstring("port must be within 1-65535"))) + }) + }) + + Context("IPv4Config", func() { + It("Should not allow empty addresses", func() { + dc := defaultCluster() + dc.Spec.IPv4Config.Addresses = []string{} + + Expect(k8sClient.Create(context.Background(), dc)).Should(MatchError(ContainSubstring("spec.ipv4Config.addresses: Required value"))) + }) + + It("Should not allow prefix higher than 128", func() { + dc := defaultCluster() + dc.Spec.IPv4Config.Prefix = 129 + + Expect(k8sClient.Create(context.Background(), dc)).Should(MatchError(ContainSubstring("should be less than or equal to 128"))) + }) + + It("Should not allow empty ip config", func() { + dc := defaultCluster() + dc.Spec.IPv6Config = nil + dc.Spec.IPv4Config = nil + Expect(k8sClient.Create(context.Background(), dc)).Should(MatchError(ContainSubstring("at least one ip config must be set"))) + }) + }) + + It("Should not allow empty DNS servers", func() { + dc := defaultCluster() + dc.Spec.DNSServers = []string{} + + Expect(k8sClient.Create(context.Background(), dc)).Should(MatchError(ContainSubstring("spec.dnsServers: Required value"))) + }) + + It("Should allow creating valid clusters", func() { + Expect(k8sClient.Create(context.Background(), defaultCluster())).To(Succeed()) + }) + + Context("CloneSpecs", func() { + It("Should not allow Cluster without ControlPlane nodes", func() { + dc := defaultCluster() + dc.Spec.CloneSpec.ProxmoxMachineSpec = map[string]ProxmoxMachineSpec{} + + Expect(k8sClient.Create(context.Background(), dc)).Should(MatchError(ContainSubstring("control plane"))) + }) + }) + + Context("IPv6Config", func() { + It("Should not allow empty addresses", func() { + dc := defaultCluster() + dc.Spec.IPv6Config = &IPConfigSpec{ + Addresses: []string{}, + Prefix: 0, + } + Expect(k8sClient.Create(context.Background(), dc)).Should(MatchError(ContainSubstring("spec.ipv6Config.addresses: Required value"))) + }) + + It("Should not allow prefix higher than 128", func() { + dc := defaultCluster() + dc.Spec.IPv6Config = &IPConfigSpec{ + Addresses: []string{}, + Prefix: 129, + } + + Expect(k8sClient.Create(context.Background(), dc)).Should(MatchError(ContainSubstring("should be less than or equal to 128"))) + }) + }) +}) + +func TestRemoveNodeLocation(t *testing.T) { + cl := ProxmoxCluster{ + Status: ProxmoxClusterStatus{NodeLocations: &NodeLocations{ + Workers: []NodeLocation{ + { + Machine: corev1.LocalObjectReference{Name: "m1"}, + Node: "n1", + }, + { + Machine: corev1.LocalObjectReference{Name: "m2"}, + Node: "n2", + }, + { + Machine: corev1.LocalObjectReference{Name: "m3"}, + Node: "n3", + }, + }, + }}, + } + + cl.RemoveNodeLocation("m1", false) + require.NotNil(t, cl.Status.NodeLocations) + require.Len(t, cl.Status.NodeLocations.Workers, 2) + + cl.RemoveNodeLocation("m1", false) + require.Len(t, cl.Status.NodeLocations.Workers, 2) + require.Equal(t, cl.Status.NodeLocations.Workers[0].Node, "n2") + + cl.UpdateNodeLocation("m4", "n4", true) + require.Len(t, cl.Status.NodeLocations.ControlPlane, 1) + + cl.RemoveNodeLocation("m4", true) + require.Len(t, cl.Status.NodeLocations.ControlPlane, 0) +} + +func TestSetInClusterIPPoolRef(t *testing.T) { + cl := defaultCluster() + + cl.SetInClusterIPPoolRef(nil) + require.Nil(t, cl.Status.InClusterIPPoolRef) + + pool := &ipamicv1.InClusterIPPool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: metav1.NamespaceDefault, + }, + Spec: ipamicv1.InClusterIPPoolSpec{ + Addresses: []string{"10.10.10.2/24"}, + Prefix: 24, + Gateway: "10.10.10.1", + }, + } + + cl.SetInClusterIPPoolRef(pool) + require.Equal(t, cl.Status.InClusterIPPoolRef[0].Name, pool.GetName()) + + cl.SetInClusterIPPoolRef(pool) + require.Equal(t, cl.Status.InClusterIPPoolRef[0].Name, pool.GetName()) +} diff --git a/api/v1alpha2/proxmoxclustertemplate_types.go b/api/v1alpha2/proxmoxclustertemplate_types.go new file mode 100644 index 00000000..a1663b9f --- /dev/null +++ b/api/v1alpha2/proxmoxclustertemplate_types.go @@ -0,0 +1,73 @@ +/* +Copyright 2024 IONOS Cloud. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +) + +// ProxmoxClusterTemplateSpec defines the desired state of ProxmoxClusterTemplate. +type ProxmoxClusterTemplateSpec struct { + // template is the Proxmox Cluster template + // +required + Template ProxmoxClusterTemplateResource `json:"template,omitzero"` +} + +// ProxmoxClusterTemplateResource defines the spec and metadata for ProxmoxClusterTemplate supported by capi. +type ProxmoxClusterTemplateResource struct { + // metadata is the standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + ObjectMeta *clusterv1.ObjectMeta `json:"metadata,omitempty"` + + // spec is the Proxmox Cluster spec + // +required + Spec ProxmoxClusterSpec `json:"spec,omitzero"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:path=proxmoxclustertemplates,scope=Namespaced,categories=cluster-api,shortName=pct +// +kubebuilder:storageversion + +// ProxmoxClusterTemplate is the Schema for the proxmoxclustertemplates API. +type ProxmoxClusterTemplate struct { + metav1.TypeMeta `json:",inline"` + // metadata is the standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec is the Proxmox Cluster Template spec + // +required + Spec ProxmoxClusterTemplateSpec `json:"spec,omitzero"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:storageversion + +// ProxmoxClusterTemplateList contains a list of ProxmoxClusterTemplate. +type ProxmoxClusterTemplateList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ProxmoxClusterTemplate `json:"items"` +} + +func init() { + objectTypes = append(objectTypes, &ProxmoxClusterTemplate{}, &ProxmoxClusterTemplateList{}) +} diff --git a/api/v1alpha2/proxmoxmachine_types.go b/api/v1alpha2/proxmoxmachine_types.go new file mode 100644 index 00000000..f6eca2da --- /dev/null +++ b/api/v1alpha2/proxmoxmachine_types.go @@ -0,0 +1,638 @@ +/* +Copyright 2023-2025 IONOS Cloud. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +import ( + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterapierrors "sigs.k8s.io/cluster-api/errors" //nolint:staticcheck +) + +const ( + // ProxmoxMachineKind is the ProxmoxMachine kind. + ProxmoxMachineKind = "ProxmoxMachine" + + // MachineFinalizer allows cleaning up resources associated with a + // ProxmoxMachine before removing it from the API Server. + MachineFinalizer = "proxmoxmachine.infrastructure.cluster.x-k8s.io" + + // DefaultReconcilerRequeue is the default value for the reconcile retry. + DefaultReconcilerRequeue = 10 * time.Second + + // DefaultNetworkDevice is the default network device name. + DefaultNetworkDevice = "net0" + + // DefaultSuffix is the default suffix for the network device. + DefaultSuffix = "inet" + + // IPv4Format is the IP v4 format. + IPv4Format = "v4" + + // IPv6Format is the IP v6 format. + IPv6Format = "v6" +) + +// ProxmoxMachineChecks defines possibibles checks to skip. +type ProxmoxMachineChecks struct { + // skipCloudInitStatus skip checking CloudInit status which can be useful with specific Operating Systems like TalOS + // +optional + SkipCloudInitStatus *bool `json:"skipCloudInitStatus,omitempty"` + // skipQemuGuestAgent skips checking QEMU Agent readiness which can be useful with specific Operating Systems like TalOS + // +optional + SkipQemuGuestAgent *bool `json:"skipQemuGuestAgent,omitempty"` +} + +// ProxmoxMachineSpec defines the desired state of a ProxmoxMachine. +type ProxmoxMachineSpec struct { + VirtualMachineCloneSpec `json:",inline"` + + // providerID is the virtual machine BIOS UUID formatted as + // proxmox://6c3fa683-bef9-4425-b413-eaa45a9d6191 + // +optional + ProviderID *string `json:"providerID,omitempty"` + + // virtualMachineID is the Proxmox identifier for the ProxmoxMachine VM. + // +optional + VirtualMachineID *int64 `json:"virtualMachineID,omitempty"` + + // numSockets is the number of CPU sockets in a virtual machine. + // Defaults to the property value in the template from which the virtual machine is cloned. + // +kubebuilder:validation:Minimum=1 + // +optional + NumSockets *int32 `json:"numSockets,omitempty"` + + // numCores is the number of cores per CPU socket in a virtual machine. + // Defaults to the property value in the template from which the virtual machine is cloned. + // +kubebuilder:validation:Minimum=1 + // +optional + NumCores *int32 `json:"numCores,omitempty"` + + // memoryMiB is the size of a virtual machine's memory, in MiB. + // Defaults to the property value in the template from which the virtual machine is cloned. + // +kubebuilder:validation:MultipleOf=8 + // +optional + MemoryMiB *int32 `json:"memoryMiB,omitempty"` + + // disks contains a set of disk configuration options, + // which will be applied before the first startup. + // + // +optional + Disks *Storage `json:"disks,omitempty"` + + // network is the network configuration for this machine's VM. + // +optional + Network *NetworkSpec `json:"network,omitempty"` + + // vmIDRange is the range of VMIDs to use for VMs. + // +optional + // +kubebuilder:validation:XValidation:rule="self.end >= self.start",message="end should be greater than or equal to start" + VMIDRange *VMIDRange `json:"vmIDRange,omitempty"` + + // checks defines possibles checks to skip. + // +optional + Checks *ProxmoxMachineChecks `json:"checks,omitempty"` + + // metadataSettings defines the metadata settings for this machine's VM. + // +optional + MetadataSettings *MetadataSettings `json:"metadataSettings,omitempty"` + + // allowedNodes specifies all Proxmox nodes which will be considered + // for operations. This implies that VMs can be cloned on different nodes from + // the node which holds the VM template. + // + // This field is optional and should only be set if you want to restrict + // the nodes where the VM can be cloned. + // If not set, the ProxmoxCluster will be used to determine the nodes. + // +optional + // +listType=set + AllowedNodes []string `json:"allowedNodes,omitempty"` + + // tags is a list of tags to be applied to the virtual machine. + // +optional + // +immutable + // +listType=set + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:items:Pattern=`^(?i)[a-z0-9_][a-z0-9_\-\+\.]*$` + Tags []string `json:"tags,omitempty"` +} + +// Storage is the physical storage on the node. +type Storage struct { + // bootVolume defines the storage size for the boot volume. + // This field is optional, and should only be set if you want + // to change the size of the boot volume. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" + // +optional + BootVolume *DiskSize `json:"bootVolume,omitempty"` + + // TODO Intended to add handling for additional volumes, + // which will be added to the node. + // e.g. AdditionalVolumes []DiskSize. +} + +// DiskSize is contains values for the disk device and size. +type DiskSize struct { + // disk is the name of the disk device that should be resized. + // Example values are: ide[0-3], scsi[0-30], sata[0-5]. + // +kubebuilder:validation:MinLength=1 + // +required + Disk string `json:"disk,omitempty"` + + // sizeGb defines the size in gigabytes. + // + // As Proxmox does not support shrinking, the size + // must be bigger than the already configured size in the + // template. + // + // +kubebuilder:validation:Minimum=5 + // +required + SizeGB int32 `json:"sizeGb,omitempty"` +} + +// TargetFileStorageFormat the target format of the cloned disk. +type TargetFileStorageFormat string + +// Supported disk formats. +const ( + TargetStorageFormatRaw TargetFileStorageFormat = "raw" + TargetStorageFormatQcow2 TargetFileStorageFormat = "qcow2" + TargetStorageFormatVmdk TargetFileStorageFormat = "vmdk" +) + +// TemplateSource defines the source of the template VM. +type TemplateSource struct { + // sourceNode is the initially selected proxmox node. + // This node will be used to locate the template VM, which will + // be used for cloning operations. + // + // Cloning will be performed according to the configuration. + // Setting the `Target` field will tell Proxmox to clone the + // VM on that target node. + // + // When Target is not set and the ProxmoxCluster contains + // a set of `AllowedNodes`, the algorithm will instead evenly + // distribute the VMs across the nodes from that list. + // + // If neither a `Target` nor `AllowedNodes` was set, the VM + // will be cloned onto the same node as SourceNode. + // + // +kubebuilder:validation:MinLength=1 + // +optional + SourceNode *string `json:"sourceNode,omitempty"` + + // templateID the vm_template vmid used for cloning a new VM. + // +optional + TemplateID *int32 `json:"templateID,omitempty"` + + // templateSelector defines MatchTags for looking up VM templates. + // +optional + TemplateSelector *TemplateSelector `json:"templateSelector,omitempty"` +} + +// VirtualMachineCloneSpec is information used to clone a virtual machine. +// +kubebuilder:validation:XValidation:rule="self.full || !has(self.format)",message="Must set full=true when specifying format" +// +kubebuilder:validation:XValidation:rule="self.full || !has(self.storage)",message="Must set full=true when specifying storage" +type VirtualMachineCloneSpec struct { + TemplateSource `json:",inline"` + + // description for the new VM. + // +optional + Description *string `json:"description,omitempty"` + + // format for file storage. Only valid for full clone. + // +kubebuilder:validation:Enum=raw;qcow2;vmdk + // +optional + Format *TargetFileStorageFormat `json:"format,omitempty"` + + // full Create a full copy of all disks. + // This is always done when you clone a normal VM. + // Create a Full clone by default. + // +kubebuilder:default=true + // +optional + Full *bool `json:"full,omitempty"` + + // pool Add the new VM to the specified pool. + // +optional + Pool *string `json:"pool,omitempty"` + + // snapName The name of the snapshot. + // +optional + SnapName *string `json:"snapName,omitempty"` + + // storage for full clone. + // +optional + Storage *string `json:"storage,omitempty"` + + // target node. Only allowed if the original VM is on shared storage. + // +optional + Target *string `json:"target,omitempty"` +} + +// TemplateSelector defines MatchTags for looking up VM templates. +type TemplateSelector struct { + // matchTags specifies all tags to look for when looking up the VM template. + // Passed tags must be an exact 1:1 match with the tags on the template you want to use. + // If multiple VM templates with the same set of tags are found, provisioning will fail. + // + // +listType=set + // +kubebuilder:validation:items:Pattern=`^(?i)[a-z0-9_][a-z0-9_\-\+\.]*$` + // +kubebuilder:validation:MinItems=1 + // +required + MatchTags []string `json:"matchTags,omitempty"` +} + +// NetworkSpec defines the virtual machine's network configuration. +type NetworkSpec struct { + // networkDevices lists network devices. + // net0 is always the default device. + // +optional + // +listType=map + // +listMapKey=name + NetworkDevices []NetworkDevice `json:"networkDevices,omitempty"` + + // VirtualNetworkDevices defines virtual network devices (e.g. bridges, vlans ...). + VirtualNetworkDevices `json:",inline"` +} + +// InterfaceConfig contains all configurables a network interface can have. +type InterfaceConfig struct { + // ipPoolRef is a reference to an IPAM Pool resource, which exposes IPv4 addresses. + // The network device will use an available IP address from the referenced pool. + // This can be combined with `IPv6PoolRef` in order to enable dual stack. + // +optional + // +kubebuilder:validation:items:XValidation:rule="self.apiGroup == 'ipam.cluster.x-k8s.io'",message="ipPoolRef allows only IPAM apiGroup ipam.cluster.x-k8s.io" + // +kubebuilder:validation:items:XValidation:rule="self.kind == 'InClusterIPPool' || self.kind == 'GlobalInClusterIPPool'",message="ipPoolRef allows either InClusterIPPool or GlobalInClusterIPPool" + // +listType=atomic + IPPoolRef []corev1.TypedLocalObjectReference `json:"ipPoolRef,omitempty"` + + // dnsServers contains information about nameservers to be used for this interface. + // If this field is not set, it will use the default dns servers from the ProxmoxCluster. + // +optional + // +kubebuilder:validation:MinItems=1 + // +listType=set + DNSServers []string `json:"dnsServers,omitempty"` + + // Routing is the common spec of routes and routing policies to all interfaces and VRFs. + Routing `json:",inline"` + + // linkMtu is the network device Maximum Transmission Unit. + // +optional + LinkMTU MTU `json:"linkMtu,omitempty"` +} + +// Routing is shared fields across devices and VRFs. +type Routing struct { + // routes are the routes associated with this interface. + // +optional + // +kubebuilder:validation:MinItems=1 + // +listType=atomic + Routes []RouteSpec `json:"routes,omitempty"` + + // routingPolicy is an interface-specific policy inserted into FIB (forwarding information base). + // +optional + // +kubebuilder:validation:MinItems=1 + // +listType=atomic + RoutingPolicy []RoutingPolicySpec `json:"routingPolicy,omitempty"` +} + +// RouteSpec describes an IPv4/IPv6 Route. +type RouteSpec struct { + // to is the subnet to be routed. + // +optional + To *string `json:"to,omitempty"` + // via is the gateway to the subnet. + // +optional + Via *string `json:"via,omitempty"` + // metric is the priority of the route in the routing table. + // +kubebuilder:validation:Minimum=0 + // +optional + Metric *int32 `json:"metric,omitempty"` + // table is the routing table used for this route. + // +optional + Table *int32 `json:"table,omitempty"` +} + +// RoutingPolicySpec is a Linux FIB rule. +type RoutingPolicySpec struct { + // to is the subnet of the target. + // +optional + To *string `json:"to,omitempty"` + + // from is the subnet of the source. + // +optional + From *string `json:"from,omitempty"` + + // table is the routing table ID. + // +optional + Table *int32 `json:"table,omitempty"` + + // priority is the position in the ip rule FIB table. + // +kubebuilder:validation:Maximum=4294967295 + // +kubebuilder:validation:XValidation:message="Cowardly refusing to insert FIB rule matching kernel rules",rule="(self > 0 && self < 32765) || (self > 32766)" + // +optional + Priority *int64 `json:"priority,omitempty"` +} + +// VRFDevice defines Virtual Routing Flow devices. +type VRFDevice struct { + // interfaces is the list of proxmox network devices managed by this virtual device. + // +optional + // +listType=set + Interfaces []NetName `json:"interfaces,omitempty"` + + // name is the virtual network device name. + // Must be unique within the virtual machine. + // +kubebuilder:validation:MinLength=3 + // +required + Name string `json:"name,omitempty"` + + // table is the ID of the routing table used for the l3mdev vrf device. + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=4294967295 + // +kubebuilder:validation:XValidation:message="Cowardly refusing to insert l3mdev rules into kernel tables",rule="(self > 0 && self < 254) || (self > 255)" + // +required + Table int32 `json:"table,omitempty"` + + // Routing is the common spec of routes and routing policies to all interfaces and VRFs. + Routing `json:",inline"` +} + +// VirtualNetworkDevices defines Linux software networking devices. +type VirtualNetworkDevices struct { + // vrfs defines VRF Devices. + // +optional + // +listType=map + // +listMapKey=name + VRFs []VRFDevice `json:"vrfs,omitempty"` +} + +// NetworkDevice defines the required details of a virtual machine network device. +type NetworkDevice struct { + // bridge is the network bridge to attach to the machine. + // +kubebuilder:validation:MinLength=1 + // +optional + Bridge *string `json:"bridge,omitempty"` + + // model is the network device model. + // +optional + // +kubebuilder:validation:Enum=e1000;virtio;rtl8139;vmxnet3 + // +kubebuilder:default=virtio + Model *string `json:"model,omitempty"` + + // mtu is the network device Maximum Transmission Unit. + // When set to 1, virtio devices inherit the MTU value from the underlying bridge. + // +optional + MTU MTU `json:"mtu,omitempty"` + + // vlan is the network L2 VLAN. + // +optional + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=4094 + VLAN *int32 `json:"vlan,omitempty"` + + // name is the network device name. + // +kubebuilder:default=net0 + // +optional + Name NetName `json:"name,omitempty"` + + // InterfaceConfig contains all configurables a network interface can have. + // +optional + InterfaceConfig `json:",inline"` +} + +// MTU is the network device Maximum Transmission Unit. MTUs below 1280 break IPv6. +// +kubebuilder:validation:XValidation:rule="self == 1 || ( self >= 576 && self <= 65520)",message="invalid MTU value" +type MTU *int32 + +// ProxmoxMachineStatus defines the observed state of a ProxmoxMachine. +type ProxmoxMachineStatus struct { + // ready indicates the Docker infrastructure has been provisioned and is ready. + // +optional + Ready *bool `json:"ready,omitempty"` + + // addresses contains the Proxmox VM instance associated addresses. + // +optional + // +listType=atomic + Addresses []clusterv1.MachineAddress `json:"addresses,omitempty"` + + // vmStatus is used to identify the virtual machine status. + // +optional + VMStatus *VirtualMachineState `json:"vmStatus,omitempty"` + + // bootstrapDataProvided whether the virtual machine has an injected bootstrap data. + // +optional + BootstrapDataProvided *bool `json:"bootstrapDataProvided,omitempty"` + + // ipAddresses are the IP addresses used to access the virtual machine. + // +optional + IPAddresses map[string]*IPAddresses `json:"ipAddresses,omitempty"` + + // network returns the network status for each of the machine's configured. + // network interfaces. + // +optional + // +listType=atomic + Network []NetworkStatus `json:"network,omitempty"` + + // proxmoxNode is the name of the proxmox node, which was chosen for this + // machine to be deployed on. + // +optional + ProxmoxNode *string `json:"proxmoxNode,omitempty"` + + // taskRef is a managed object reference to a Task related to the ProxmoxMachine. + // This value is set automatically at runtime and should not be set or + // modified by users. + // +optional + TaskRef *string `json:"taskRef,omitempty"` + + // retryAfter tracks the time we can retry queueing a task. + // +optional + RetryAfter *metav1.Time `json:"retryAfter,omitempty"` + + // failureReason will be set in the event that there is a terminal problem + // reconciling the Machine and will contain a succinct value suitable + // for machine interpretation. + // + // This field should not be set for transitive errors that a controller + // faces that are expected to be fixed automatically over + // time (like service outages), but instead indicate that something is + // fundamentally wrong with the Machine's spec or the configuration of + // the controller, and that manual intervention is required. Examples + // of terminal errors would be invalid combinations of settings in the + // spec, values that are unsupported by the controller, or the + // responsible controller itself being critically misconfigured. + // + // Any transient errors that occur during the reconciliation of ProxmoxMachines + // can be added as events to the ProxmoxMachine object and/or logged in the + // controller's output. + // +optional + FailureReason *clusterapierrors.MachineStatusError `json:"failureReason,omitempty"` + + // failureMessage will be set in the event that there is a terminal problem + // reconciling the Machine and will contain a more verbose string suitable + // for logging and human consumption. + // + // This field should not be set for transitive errors that a controller + // faces that are expected to be fixed automatically over + // time (like service outages), but instead indicate that something is + // fundamentally wrong with the Machine's spec or the configuration of + // the controller, and that manual intervention is required. Examples + // of terminal errors would be invalid combinations of settings in the + // spec, values that are unsupported by the controller, or the + // responsible controller itself being critically misconfigured. + // + // Any transient errors that occur during the reconciliation of ProxmoxMachines + // can be added as events to the ProxmoxMachine object and/or logged in the + // controller's output. + // +optional + FailureMessage *string `json:"failureMessage,omitempty"` + + // conditions defines current service state of the ProxmoxMachine. + Conditions clusterv1.Conditions `json:"conditions,omitempty"` +} + +// IPAddresses stores the IP addresses of a network interface. Used for status. +// TODO: Unfuck machine status. +type IPAddresses struct { + // ipv4 is the IPv4 address. + // +listType=set + // +optional + IPv4 []string `json:"ipv4,omitempty"` + + // ipv6 is the IPv6 address. + // +listType=set + // +optional + IPv6 []string `json:"ipv6,omitempty"` +} + +// VMIDRange defines the range of VMIDs to use for VMs. +type VMIDRange struct { + // start is the start of the VMID range to use for VMs. + // +kubebuilder:validation:Minimum=100 + // +kubebuilder:validation:ExclusiveMinimum=false + // +kubebuilder:validation:Maximum=999999999 + // +kubebuilder:validation:ExclusiveMaximum=false + // +required + Start int64 `json:"start,omitempty"` + + // end is the end of the VMID range to use for VMs. + // Only used if VMIDRangeStart is set. + // +kubebuilder:validation:Minimum=100 + // +kubebuilder:validation:ExclusiveMinimum=false + // +kubebuilder:validation:Maximum=999999999 + // +kubebuilder:validation:ExclusiveMaximum=false + // +required + End int64 `json:"end,omitempty"` +} + +// MetadataSettings defines the metadata settings for the machine. +type MetadataSettings struct { + // providerIDInjection enables the injection of the `providerID` into the cloudinit metadata. + // this will basically set the `provider-id` field in the metadata to `proxmox://`. + // +required + ProviderIDInjection *bool `json:"providerIDInjection,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:storageversion +// +kubebuilder:subresource:status +// +kubebuilder:resource:path=proxmoxmachines,scope=Namespaced,categories=cluster-api;proxmox,shortName=moxm +// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels.cluster\\.x-k8s\\.io/cluster-name",description="Cluster to which this ProxmoxMachine belongs" +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Machine ready status" +// +kubebuilder:printcolumn:name="Node",type="string",JSONPath=".status.proxmoxNode",description="Proxmox Node that the machine was deployed on" +// +kubebuilder:printcolumn:name="Provider_ID",type="string",JSONPath=".spec.providerID",description="Provider ID" +// +kubebuilder:printcolumn:name="Machine",type="string",JSONPath=".metadata.ownerReferences[?(@.kind==\"Machine\")].name",description="Machine object which owns with this ProxmoxMachine" + +// ProxmoxMachine is the Schema for the proxmoxmachines API. +type ProxmoxMachine struct { + metav1.TypeMeta `json:",inline"` + // metadata is the standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec is the Proxmox machine spec. + // +kubebuilder:validation:XValidation:rule="[has(self.sourceNode), has(self.templateSelector)].exists_one(c, c)",message="must define either a SourceNode with a TemplateID or a TemplateSelector" + // +kubebuilder:validation:XValidation:rule="[has(self.templateID), has(self.templateSelector)].exists_one(c, c)",message="must define either a SourceNode with a TemplateID or a TemplateSelector" + // +required + Spec *ProxmoxMachineSpec `json:"spec,omitempty"` + + // status is the status of the Proxmox machine. + // +optional + Status ProxmoxMachineStatus `json:"status,omitzero"` +} + +//+kubebuilder:object:root=true + +// ProxmoxMachineList contains a list of ProxmoxMachine. +type ProxmoxMachineList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ProxmoxMachine `json:"items"` +} + +// GetConditions returns the observations of the operational state of the ProxmoxMachine resource. +func (r *ProxmoxMachine) GetConditions() clusterv1.Conditions { + return r.Status.Conditions +} + +// SetConditions sets the underlying service state of the ProxmoxMachine to the predescribed clusterv1.Conditions. +func (r *ProxmoxMachine) SetConditions(conditions clusterv1.Conditions) { + r.Status.Conditions = conditions +} + +// GetVirtualMachineID get the Proxmox "vmid". +func (r *ProxmoxMachine) GetVirtualMachineID() int64 { + if r.Spec.VirtualMachineID != nil { + return *r.Spec.VirtualMachineID + } + return -1 +} + +// GetTemplateID get the Proxmox template "vmid" used to provision this machine. +func (r *ProxmoxMachine) GetTemplateID() int32 { + if r.Spec.TemplateID != nil { + return *r.Spec.TemplateID + } + return -1 +} + +// GetTemplateSelectorTags get the tags, the desired vm template should have. +func (r *ProxmoxMachine) GetTemplateSelectorTags() []string { + if r.Spec.TemplateSelector != nil { + return r.Spec.TemplateSelector.MatchTags + } + return nil +} + +// GetNode get the Proxmox node used to provision this machine. +func (r *ProxmoxMachine) GetNode() string { + return ptr.Deref(r.Spec.SourceNode, "") +} + +// FormatSize returns the format required for the Proxmox API. +func (d *DiskSize) FormatSize() string { + return fmt.Sprintf("%dG", d.SizeGB) +} + +func init() { + objectTypes = append(objectTypes, &ProxmoxMachine{}, &ProxmoxMachineList{}) +} diff --git a/api/v1alpha2/proxmoxmachine_types_test.go b/api/v1alpha2/proxmoxmachine_types_test.go new file mode 100644 index 00000000..0566df8e --- /dev/null +++ b/api/v1alpha2/proxmoxmachine_types_test.go @@ -0,0 +1,397 @@ +/* +Copyright 2023-2024 IONOS Cloud. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +import ( + "context" + "strconv" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + . "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/consts" +) + +func defaultMachine() *ProxmoxMachine { + return &ProxmoxMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-machine", + Namespace: metav1.NamespaceDefault, + }, + Spec: ptr.To(ProxmoxMachineSpec{ + ProviderID: ptr.To("proxmox://abcdef"), + VirtualMachineID: ptr.To[int64](100), + VirtualMachineCloneSpec: VirtualMachineCloneSpec{ + TemplateSource: TemplateSource{ + SourceNode: ptr.To("pve1"), + TemplateID: ptr.To[int32](100), + }, + }, + Disks: &Storage{ + BootVolume: &DiskSize{ + Disk: "scsi0", + SizeGB: 100, + }, + }, + }), + } +} + +var _ = Describe("ProxmoxMachine Test", func() { + AfterEach(func() { + err := k8sClient.Delete(context.Background(), defaultMachine()) + Expect(client.IgnoreNotFound(err)).To(Succeed()) + }) + + Context("VirtualMachineCloneSpec", func() { + It("Should not allow specifying format if full clone is disabled", func() { + dm := defaultMachine() + dm.Spec.Format = ptr.To(TargetStorageFormatRaw) + dm.Spec.Full = ptr.To(false) + + Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("Must set full=true when specifying format"))) + }) + + It("Should not allow specifying storage if full clone is disabled", func() { + dm := defaultMachine() + dm.Spec.Storage = ptr.To("local") + dm.Spec.Full = ptr.To(false) + + Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("Must set full=true when specifying storage"))) + }) + + It("Should allow disabling full clone in absence of format and storage", func() { + dm := defaultMachine() + dm.Spec.Format = nil + dm.Spec.Storage = nil + dm.Spec.Full = ptr.To(false) + + Expect(k8sClient.Create(context.Background(), dm)).Should(Succeed()) + }) + + It("Should disallow absence of SourceNode, TemplateID, and TemplateSelector", func() { + dm := defaultMachine() + dm.Spec.TemplateSource.SourceNode = nil + dm.Spec.TemplateSource.TemplateID = nil + dm.Spec.TemplateSelector = nil + Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("must define either a SourceNode with a TemplateID or a TemplateSelector"))) + }) + + It("Should not allow specifying TemplateSelector together with SourceNode and/or TemplateID", func() { + dm := defaultMachine() + dm.Spec.TemplateSelector = &TemplateSelector{MatchTags: []string{"test"}} + Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("must define either a SourceNode with a TemplateID or a TemplateSelector"))) + }) + + It("Should not allow specifying TemplateSelector with empty MatchTags", func() { + dm := defaultMachine() + dm.Spec.TemplateSelector = &TemplateSelector{MatchTags: []string{}} + + Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("spec.templateSelector.matchTags: Required value"))) + }) + + It("Should only allow valid MatchTags", func() { + testCases := []struct { + tag string + expectErrror bool + errorMessage string + }{ + // Valid Tags + {"valid_tag", false, ""}, + {"Valid-Tag", false, ""}, + {"valid.tag", false, ""}, + {"VALID+TAG", false, ""}, + {"123tag", false, ""}, + {"tag123", false, ""}, + {"tag_with-hyphen", false, ""}, + {"tag.with.plus+_and-hyphen", false, ""}, + {"_tag_with_underscore", false, ""}, + + // Invalid Tags + {"", true, "in body should match"}, // Empty string + {"-invalid", true, "in body should match"}, // Starts with a hyphen + {"+invalid", true, "in body should match"}, // Starts with a plus + {".invalid", true, "in body should match"}, // Starts with a dot + {" invalid", true, "in body should match"}, // Starts with a space + {"invalid!", true, "in body should match"}, // Contains an exclamation mark + {"invalid@", true, "in body should match"}, // Contains an at symbol + {"invalid#", true, "in body should match"}, // Contains a hash symbol + {"inval id", true, "in body should match"}, // Contains a whitespace + } + + // Iterate through each test case + for i, testCase := range testCases { + // Create a new ProxmoxMachine object for each test case + dm := defaultMachine() + + // Set the name of the machine to a unique value based on the test case index + dm.ObjectMeta.Name = "test-machine-" + strconv.Itoa(i) + + // Set the template selector to match the tag from the test case + dm.Spec.TemplateSource.SourceNode = nil + dm.Spec.TemplateSource.TemplateID = nil + dm.Spec.TemplateSelector = &TemplateSelector{MatchTags: []string{testCase.tag}} + + // Run test + if !testCase.expectErrror { + Expect(k8sClient.Create(context.Background(), dm)).To(Succeed()) + } else { + Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring(testCase.errorMessage))) + } + } + }) + }) + + Context("Disks", func() { + It("Should not allow updates to disks", func() { + dm := defaultMachine() + Expect(k8sClient.Create(context.Background(), dm)).To(Succeed()) + dm.Spec.Disks.BootVolume.SizeGB = 50 + Expect(k8sClient.Update(context.Background(), dm)).Should(MatchError(ContainSubstring("is immutable"))) + }) + + It("Should not allow negative or less than minimum values", func() { + dm := defaultMachine() + + dm.Spec.Disks.BootVolume.SizeGB = -10 + Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("greater than or equal to 5"))) + + dm.Spec.Disks.BootVolume.SizeGB = 4 + Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("greater than or equal to 5"))) + }) + }) + + Context("Network", func() { + It("Should set default bridge", func() { + dm := defaultMachine() + dm.Spec.Network = &NetworkSpec{ + NetworkDevices: []NetworkDevice{{ + Bridge: ptr.To(""), + }}, + } + + Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("should be at least 1 chars long"))) + }) + + It("Should not allow net0 in additional network devices", func() { + dm := defaultMachine() + dm.Spec.Network = &NetworkSpec{ + NetworkDevices: []NetworkDevice{{ + Bridge: ptr.To("vmbr0"), + }, { + Name: ptr.To("net0"), + InterfaceConfig: InterfaceConfig{ + IPPoolRef: []corev1.TypedLocalObjectReference{{ + APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: InClusterIPPool, + Name: "some-pool", + }}, + }, + }}, + } + + Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("spec.network.networkDevices[1]: Duplicate value"))) + }) + + It("Should only allow IPAM pool resources in IPPoolRef apiGroup", func() { + dm := defaultMachine() + dm.Spec.Network = &NetworkSpec{ + NetworkDevices: []NetworkDevice{{ + Bridge: ptr.To("vmbr0"), + Name: ptr.To("net1"), + InterfaceConfig: InterfaceConfig{ + IPPoolRef: []corev1.TypedLocalObjectReference{{ + APIGroup: ptr.To("apps"), + Name: "some-app", + }}, + }, + }}, + } + Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("ipPoolRef allows only IPAM apiGroup ipam.cluster.x-k8s.io"))) + }) + + It("Should only allow IPAM pool resources in IPPoolRef kind", func() { + dm := defaultMachine() + dm.Spec.Network = &NetworkSpec{ + NetworkDevices: []NetworkDevice{{ + Bridge: ptr.To("vmbr0"), + Name: ptr.To("net1"), + InterfaceConfig: InterfaceConfig{ + IPPoolRef: []corev1.TypedLocalObjectReference{{ + APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: "ConfigMap", + Name: "some-app", + }}, + }, + }}, + } + Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("ipPoolRef allows either InClusterIPPool or GlobalInClusterIPPool"))) + }) + + It("Should not allow machine with network device mtu less than 1", func() { + dm := defaultMachine() + dm.Spec.Network = &NetworkSpec{ + NetworkDevices: []NetworkDevice{{ + Bridge: ptr.To("vmbr0"), + MTU: ptr.To(int32(0)), + }}, + } + + Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("invalid MTU value"))) + }) + + It("Should not allow machine with network device mtu greater than 65520", func() { + dm := defaultMachine() + dm.Spec.Network = &NetworkSpec{ + NetworkDevices: []NetworkDevice{{ + Bridge: ptr.To("vmbr0"), + MTU: ptr.To(int32(65521)), + }}, + } + + Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("invalid MTU value"))) + }) + + It("Should only allow VRFS with a non kernel routing table ", func() { + dm := defaultMachine() + dm.Spec.Network = &NetworkSpec{ + VirtualNetworkDevices: VirtualNetworkDevices{ + VRFs: []VRFDevice{{ + Name: "vrf-blue", + Table: 254, + }}, + }, + } + + Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("Cowardly refusing to insert l3mdev rules into kernel tables"))) + }) + + It("Should only allow non kernel FIB rule priority", func() { + dm := defaultMachine() + dm.Spec.Network = &NetworkSpec{ + VirtualNetworkDevices: VirtualNetworkDevices{ + VRFs: []VRFDevice{{ + Name: "vrf-blue", + Table: 100, + Routing: Routing{ + RoutingPolicy: []RoutingPolicySpec{{ + Priority: ptr.To(int64(32766)), + }}, + }, + }}, + }, + } + + Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("Cowardly refusing to insert FIB rule matching kernel rules"))) + }) + + It("Should not allow machine with network device vlan equal to 0", func() { + dm := defaultMachine() + dm.Spec.Network = &NetworkSpec{ + NetworkDevices: []NetworkDevice{{ + Bridge: ptr.To("vmbr0"), + VLAN: ptr.To(int32(0)), + }}, + } + + Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("should be greater than or equal to 1"))) + }) + + It("Should not allow machine with network device vlan greater than 4094", func() { + dm := defaultMachine() + dm.Spec.Network = &NetworkSpec{ + NetworkDevices: []NetworkDevice{{ + Bridge: ptr.To("vmbr0"), + VLAN: ptr.To(int32(4095)), + }}, + } + + Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("should be less than or equal to 4094"))) + }) + }) + + Context("VMIDRange", func() { + It("Should only allow spec.vmIDRange.start >= 100", func() { + dm := defaultMachine() + dm.Spec.VMIDRange = &VMIDRange{ + Start: 1, + } + Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("should be greater than or equal to 100"))) + }) + It("Should only allow spec.vmIDRange.end >= 100", func() { + dm := defaultMachine() + dm.Spec.VMIDRange = &VMIDRange{ + End: 1, + } + Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("should be greater than or equal to 100"))) + }) + It("Should only allow spec.vmIDRange.end >= spec.vmIDRange.start", func() { + dm := defaultMachine() + dm.Spec.VMIDRange = &VMIDRange{ + Start: 101, + End: 100, + } + Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("should be greater than or equal to start"))) + }) + It("Should only allow spec.vmIDRange.start if spec.vmIDRange.end is set", func() { + dm := defaultMachine() + dm.Spec.VMIDRange = &VMIDRange{ + Start: 100, + } + Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("spec.vmIDRange.end: Required value"))) + }) + It("Should only allow spec.vmIDRange.end if spec.vmIDRange.start is set", func() { + dm := defaultMachine() + dm.Spec.VMIDRange = &VMIDRange{ + End: 100, + } + Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("spec.vmIDRange.start: Required value"))) + }) + }) + + Context("Tags", func() { + It("should disallow invalid tags", func() { + dm := defaultMachine() + dm.Spec.Tags = []string{"foo=bar"} + Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("Invalid value"))) + + dm.Spec.Tags = []string{"foo$bar"} + Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("Invalid value"))) + + dm.Spec.Tags = []string{"foo^bar"} + Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("Invalid value"))) + + dm.Spec.Tags = []string{"foo bar"} + Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("Invalid value"))) + + dm.Spec.Tags = []string{"foo "} + Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("Invalid value"))) + }) + + It("Should not allow duplicated tags", func() { + dm := defaultMachine() + dm.Spec.Tags = []string{"foo", "bar", "foo"} + Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("Duplicate value"))) + dm.Spec.Tags = []string{"foo", "bar"} + Expect(k8sClient.Create(context.Background(), dm)).To(Succeed()) + }) + }) +}) diff --git a/api/v1alpha2/proxmoxmachinetemplate_types.go b/api/v1alpha2/proxmoxmachinetemplate_types.go new file mode 100644 index 00000000..7b607fb8 --- /dev/null +++ b/api/v1alpha2/proxmoxmachinetemplate_types.go @@ -0,0 +1,72 @@ +/* +Copyright 2023-2024 IONOS Cloud. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +) + +// ProxmoxMachineTemplateSpec defines the desired state of ProxmoxMachineTemplate. +type ProxmoxMachineTemplateSpec struct { + // template is the Proxmox machine template resource. + // +required + Template ProxmoxMachineTemplateResource `json:"template,omitzero"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=proxmoxmachinetemplates,scope=Namespaced,categories=cluster-api,shortName=pmt +// +kubebuilder:storageversion +// +kubebuilder:subresource:status + +// ProxmoxMachineTemplate is the Schema for the proxmoxmachinetemplates API. +type ProxmoxMachineTemplate struct { + metav1.TypeMeta `json:",inline"` + // metadata is the standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec is the machine template spec. + // +required + Spec ProxmoxMachineTemplateSpec `json:"spec,omitzero"` +} + +// ProxmoxMachineTemplateResource defines the spec and metadata for ProxmoxMachineTemplate supported by capi. +type ProxmoxMachineTemplateResource struct { + // metadata is the standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + ObjectMeta *clusterv1.ObjectMeta `json:"metadata,omitempty"` + + // spec is the Proxmox machine spec. + // +required + Spec *ProxmoxMachineSpec `json:"spec,omitempty"` +} + +//+kubebuilder:object:root=true + +// ProxmoxMachineTemplateList contains a list of ProxmoxMachineTemplate. +type ProxmoxMachineTemplateList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ProxmoxMachineTemplate `json:"items"` +} + +func init() { + objectTypes = append(objectTypes, &ProxmoxMachineTemplate{}, &ProxmoxMachineTemplateList{}) +} diff --git a/api/v1alpha2/suite_test.go b/api/v1alpha2/suite_test.go new file mode 100644 index 00000000..c7233366 --- /dev/null +++ b/api/v1alpha2/suite_test.go @@ -0,0 +1,75 @@ +/* +Copyright 2023 IONOS Cloud. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +import ( + "path/filepath" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +var ( + k8sClient client.Client + testEnv *envtest.Environment +) + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + sc, rc := GinkgoConfiguration() + sc.FailFast = true + rc.NoColor = true + RunSpecs(t, "v1alpha2 API Suite", sc, rc) +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{ + filepath.Join("..", "..", "config", "crd", "bases"), + }, + ErrorIfCRDPathMissing: true, + } + + scheme := runtime.NewScheme() + Expect(AddToScheme(scheme)).To(Succeed()) + + cfg, err := testEnv.Start() + Expect(err).ToNot(HaveOccurred()) + Expect(cfg).ToNot(BeNil()) + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + +}) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/api/v1alpha2/types.go b/api/v1alpha2/types.go new file mode 100644 index 00000000..442cfd54 --- /dev/null +++ b/api/v1alpha2/types.go @@ -0,0 +1,89 @@ +/* +Copyright 2023 IONOS Cloud. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +// VirtualMachineState describes the state of a VM. +type VirtualMachineState string + +const ( + // VirtualMachineStateNotFound is the string representing a VM that + // cannot be located. + VirtualMachineStateNotFound VirtualMachineState = "notfound" + + // VirtualMachineStatePending is the string representing a VM with an in-flight task. + VirtualMachineStatePending VirtualMachineState = "pending" + + // VirtualMachineStateReady is the string representing a powered-on VM with reported IP addresses. + VirtualMachineStateReady VirtualMachineState = "ready" +) + +// VirtualMachine represents data about a Proxmox virtual machine object. +type VirtualMachine struct { + // node is the VM node. + // +required + // +kubebuilder:validation:MinLength=1 + Node string `json:"node,omitempty"` + + // name is the VM's name. + // +required + // +kubebuilder:validation:MinLength=1 + Name string `json:"name,omitempty"` + + // vmID is the VM's ID. + // +required + // +kubebuilder:validation:Minimum=100 + // +kubebuilder:validation:ExclusiveMinimum=false + VMID int64 `json:"vmID,omitempty"` + + // state is the VM's state. + // +required + // +kubebuilder:validation:Enum=notfound;pending;ready + State VirtualMachineState `json:"state,omitempty"` + + // network is the status of the VM's network devices. + // +required + // +listType=atomic + Network []NetworkStatus `json:"network,omitempty"` +} + +// NetworkStatus provides information about one of a VM's networks. +type NetworkStatus struct { + // connected is a flag that indicates whether this network is currently + // connected to the VM. + // +required + Connected *bool `json:"connected,omitempty"` + + // ipAddrs is one or more IP addresses reported by vm-tools. + // +listType=set + // +optional + IPAddrs []string `json:"ipAddrs,omitempty"` + + // macAddr is the MAC address of the network device. + // +required + // +kubebuilder:validation:Pattern=`^([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2})$` + // +kubebuilder:validation:MinLength=17 + // +kubebuilder:validation:MaxLength=17 + MACAddr string `json:"macAddr,omitempty"` + + // networkName is the name of the network. + // +optional + NetworkName NetName `json:"networkName,omitempty"` +} + +// NetName is a formally verified Proxmox network name string. +// +kubebuilder:validation:Pattern=`^net[0-9]+$` +type NetName *string diff --git a/api/v1alpha2/zz_generated.deepcopy.go b/api/v1alpha2/zz_generated.deepcopy.go new file mode 100644 index 00000000..33054f7a --- /dev/null +++ b/api/v1alpha2/zz_generated.deepcopy.go @@ -0,0 +1,1236 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/errors" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskSize) DeepCopyInto(out *DiskSize) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskSize. +func (in *DiskSize) DeepCopy() *DiskSize { + if in == nil { + return nil + } + out := new(DiskSize) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAddresses) DeepCopyInto(out *IPAddresses) { + *out = *in + if in.IPv4 != nil { + in, out := &in.IPv4, &out.IPv4 + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IPv6 != nil { + in, out := &in.IPv6, &out.IPv6 + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddresses. +func (in *IPAddresses) DeepCopy() *IPAddresses { + if in == nil { + return nil + } + out := new(IPAddresses) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPConfigSpec) DeepCopyInto(out *IPConfigSpec) { + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Metric != nil { + in, out := &in.Metric, &out.Metric + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPConfigSpec. +func (in *IPConfigSpec) DeepCopy() *IPConfigSpec { + if in == nil { + return nil + } + out := new(IPConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InterfaceConfig) DeepCopyInto(out *InterfaceConfig) { + *out = *in + if in.IPPoolRef != nil { + in, out := &in.IPPoolRef, &out.IPPoolRef + *out = make([]v1.TypedLocalObjectReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DNSServers != nil { + in, out := &in.DNSServers, &out.DNSServers + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.Routing.DeepCopyInto(&out.Routing) + if in.LinkMTU != nil { + in, out := &in.LinkMTU, &out.LinkMTU + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InterfaceConfig. +func (in *InterfaceConfig) DeepCopy() *InterfaceConfig { + if in == nil { + return nil + } + out := new(InterfaceConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetadataSettings) DeepCopyInto(out *MetadataSettings) { + *out = *in + if in.ProviderIDInjection != nil { + in, out := &in.ProviderIDInjection, &out.ProviderIDInjection + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetadataSettings. +func (in *MetadataSettings) DeepCopy() *MetadataSettings { + if in == nil { + return nil + } + out := new(MetadataSettings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkDevice) DeepCopyInto(out *NetworkDevice) { + *out = *in + if in.Bridge != nil { + in, out := &in.Bridge, &out.Bridge + *out = new(string) + **out = **in + } + if in.Model != nil { + in, out := &in.Model, &out.Model + *out = new(string) + **out = **in + } + if in.MTU != nil { + in, out := &in.MTU, &out.MTU + *out = new(int32) + **out = **in + } + if in.VLAN != nil { + in, out := &in.VLAN, &out.VLAN + *out = new(int32) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + in.InterfaceConfig.DeepCopyInto(&out.InterfaceConfig) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDevice. +func (in *NetworkDevice) DeepCopy() *NetworkDevice { + if in == nil { + return nil + } + out := new(NetworkDevice) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) { + *out = *in + if in.NetworkDevices != nil { + in, out := &in.NetworkDevices, &out.NetworkDevices + *out = make([]NetworkDevice, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.VirtualNetworkDevices.DeepCopyInto(&out.VirtualNetworkDevices) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec. +func (in *NetworkSpec) DeepCopy() *NetworkSpec { + if in == nil { + return nil + } + out := new(NetworkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkStatus) DeepCopyInto(out *NetworkStatus) { + *out = *in + if in.Connected != nil { + in, out := &in.Connected, &out.Connected + *out = new(bool) + **out = **in + } + if in.IPAddrs != nil { + in, out := &in.IPAddrs, &out.IPAddrs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NetworkName != nil { + in, out := &in.NetworkName, &out.NetworkName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkStatus. +func (in *NetworkStatus) DeepCopy() *NetworkStatus { + if in == nil { + return nil + } + out := new(NetworkStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeLocation) DeepCopyInto(out *NodeLocation) { + *out = *in + out.Machine = in.Machine +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeLocation. +func (in *NodeLocation) DeepCopy() *NodeLocation { + if in == nil { + return nil + } + out := new(NodeLocation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeLocations) DeepCopyInto(out *NodeLocations) { + *out = *in + if in.ControlPlane != nil { + in, out := &in.ControlPlane, &out.ControlPlane + *out = make([]NodeLocation, len(*in)) + copy(*out, *in) + } + if in.Workers != nil { + in, out := &in.Workers, &out.Workers + *out = make([]NodeLocation, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeLocations. +func (in *NodeLocations) DeepCopy() *NodeLocations { + if in == nil { + return nil + } + out := new(NodeLocations) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxmoxCluster) DeepCopyInto(out *ProxmoxCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxmoxCluster. +func (in *ProxmoxCluster) DeepCopy() *ProxmoxCluster { + if in == nil { + return nil + } + out := new(ProxmoxCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProxmoxCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxmoxClusterCloneSpec) DeepCopyInto(out *ProxmoxClusterCloneSpec) { + *out = *in + if in.ProxmoxMachineSpec != nil { + in, out := &in.ProxmoxMachineSpec, &out.ProxmoxMachineSpec + *out = make(map[string]ProxmoxMachineSpec, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.SSHAuthorizedKeys != nil { + in, out := &in.SSHAuthorizedKeys, &out.SSHAuthorizedKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.VirtualIPNetworkInterface != nil { + in, out := &in.VirtualIPNetworkInterface, &out.VirtualIPNetworkInterface + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxmoxClusterCloneSpec. +func (in *ProxmoxClusterCloneSpec) DeepCopy() *ProxmoxClusterCloneSpec { + if in == nil { + return nil + } + out := new(ProxmoxClusterCloneSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxmoxClusterList) DeepCopyInto(out *ProxmoxClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ProxmoxCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxmoxClusterList. +func (in *ProxmoxClusterList) DeepCopy() *ProxmoxClusterList { + if in == nil { + return nil + } + out := new(ProxmoxClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProxmoxClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxmoxClusterSpec) DeepCopyInto(out *ProxmoxClusterSpec) { + *out = *in + if in.ControlPlaneEndpoint != nil { + in, out := &in.ControlPlaneEndpoint, &out.ControlPlaneEndpoint + *out = new(v1beta1.APIEndpoint) + **out = **in + } + if in.ExternalManagedControlPlane != nil { + in, out := &in.ExternalManagedControlPlane, &out.ExternalManagedControlPlane + *out = new(bool) + **out = **in + } + if in.AllowedNodes != nil { + in, out := &in.AllowedNodes, &out.AllowedNodes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SchedulerHints != nil { + in, out := &in.SchedulerHints, &out.SchedulerHints + *out = new(SchedulerHints) + (*in).DeepCopyInto(*out) + } + if in.IPv4Config != nil { + in, out := &in.IPv4Config, &out.IPv4Config + *out = new(IPConfigSpec) + (*in).DeepCopyInto(*out) + } + if in.IPv6Config != nil { + in, out := &in.IPv6Config, &out.IPv6Config + *out = new(IPConfigSpec) + (*in).DeepCopyInto(*out) + } + if in.DNSServers != nil { + in, out := &in.DNSServers, &out.DNSServers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.CloneSpec != nil { + in, out := &in.CloneSpec, &out.CloneSpec + *out = new(ProxmoxClusterCloneSpec) + (*in).DeepCopyInto(*out) + } + if in.CredentialsRef != nil { + in, out := &in.CredentialsRef, &out.CredentialsRef + *out = new(v1.SecretReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxmoxClusterSpec. +func (in *ProxmoxClusterSpec) DeepCopy() *ProxmoxClusterSpec { + if in == nil { + return nil + } + out := new(ProxmoxClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxmoxClusterStatus) DeepCopyInto(out *ProxmoxClusterStatus) { + *out = *in + if in.Ready != nil { + in, out := &in.Ready, &out.Ready + *out = new(bool) + **out = **in + } + if in.InClusterIPPoolRef != nil { + in, out := &in.InClusterIPPoolRef, &out.InClusterIPPoolRef + *out = make([]v1.LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.NodeLocations != nil { + in, out := &in.NodeLocations, &out.NodeLocations + *out = new(NodeLocations) + (*in).DeepCopyInto(*out) + } + if in.FailureReason != nil { + in, out := &in.FailureReason, &out.FailureReason + *out = new(errors.ClusterStatusError) + **out = **in + } + if in.FailureMessage != nil { + in, out := &in.FailureMessage, &out.FailureMessage + *out = new(string) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(v1beta1.Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxmoxClusterStatus. +func (in *ProxmoxClusterStatus) DeepCopy() *ProxmoxClusterStatus { + if in == nil { + return nil + } + out := new(ProxmoxClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxmoxClusterTemplate) DeepCopyInto(out *ProxmoxClusterTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxmoxClusterTemplate. +func (in *ProxmoxClusterTemplate) DeepCopy() *ProxmoxClusterTemplate { + if in == nil { + return nil + } + out := new(ProxmoxClusterTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProxmoxClusterTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxmoxClusterTemplateList) DeepCopyInto(out *ProxmoxClusterTemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ProxmoxClusterTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxmoxClusterTemplateList. +func (in *ProxmoxClusterTemplateList) DeepCopy() *ProxmoxClusterTemplateList { + if in == nil { + return nil + } + out := new(ProxmoxClusterTemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProxmoxClusterTemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxmoxClusterTemplateResource) DeepCopyInto(out *ProxmoxClusterTemplateResource) { + *out = *in + if in.ObjectMeta != nil { + in, out := &in.ObjectMeta, &out.ObjectMeta + *out = new(v1beta1.ObjectMeta) + (*in).DeepCopyInto(*out) + } + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxmoxClusterTemplateResource. +func (in *ProxmoxClusterTemplateResource) DeepCopy() *ProxmoxClusterTemplateResource { + if in == nil { + return nil + } + out := new(ProxmoxClusterTemplateResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxmoxClusterTemplateSpec) DeepCopyInto(out *ProxmoxClusterTemplateSpec) { + *out = *in + in.Template.DeepCopyInto(&out.Template) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxmoxClusterTemplateSpec. +func (in *ProxmoxClusterTemplateSpec) DeepCopy() *ProxmoxClusterTemplateSpec { + if in == nil { + return nil + } + out := new(ProxmoxClusterTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxmoxMachine) DeepCopyInto(out *ProxmoxMachine) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(ProxmoxMachineSpec) + (*in).DeepCopyInto(*out) + } + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxmoxMachine. +func (in *ProxmoxMachine) DeepCopy() *ProxmoxMachine { + if in == nil { + return nil + } + out := new(ProxmoxMachine) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProxmoxMachine) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxmoxMachineChecks) DeepCopyInto(out *ProxmoxMachineChecks) { + *out = *in + if in.SkipCloudInitStatus != nil { + in, out := &in.SkipCloudInitStatus, &out.SkipCloudInitStatus + *out = new(bool) + **out = **in + } + if in.SkipQemuGuestAgent != nil { + in, out := &in.SkipQemuGuestAgent, &out.SkipQemuGuestAgent + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxmoxMachineChecks. +func (in *ProxmoxMachineChecks) DeepCopy() *ProxmoxMachineChecks { + if in == nil { + return nil + } + out := new(ProxmoxMachineChecks) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxmoxMachineList) DeepCopyInto(out *ProxmoxMachineList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ProxmoxMachine, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxmoxMachineList. +func (in *ProxmoxMachineList) DeepCopy() *ProxmoxMachineList { + if in == nil { + return nil + } + out := new(ProxmoxMachineList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProxmoxMachineList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxmoxMachineSpec) DeepCopyInto(out *ProxmoxMachineSpec) { + *out = *in + in.VirtualMachineCloneSpec.DeepCopyInto(&out.VirtualMachineCloneSpec) + if in.ProviderID != nil { + in, out := &in.ProviderID, &out.ProviderID + *out = new(string) + **out = **in + } + if in.VirtualMachineID != nil { + in, out := &in.VirtualMachineID, &out.VirtualMachineID + *out = new(int64) + **out = **in + } + if in.NumSockets != nil { + in, out := &in.NumSockets, &out.NumSockets + *out = new(int32) + **out = **in + } + if in.NumCores != nil { + in, out := &in.NumCores, &out.NumCores + *out = new(int32) + **out = **in + } + if in.MemoryMiB != nil { + in, out := &in.MemoryMiB, &out.MemoryMiB + *out = new(int32) + **out = **in + } + if in.Disks != nil { + in, out := &in.Disks, &out.Disks + *out = new(Storage) + (*in).DeepCopyInto(*out) + } + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(NetworkSpec) + (*in).DeepCopyInto(*out) + } + if in.VMIDRange != nil { + in, out := &in.VMIDRange, &out.VMIDRange + *out = new(VMIDRange) + **out = **in + } + if in.Checks != nil { + in, out := &in.Checks, &out.Checks + *out = new(ProxmoxMachineChecks) + (*in).DeepCopyInto(*out) + } + if in.MetadataSettings != nil { + in, out := &in.MetadataSettings, &out.MetadataSettings + *out = new(MetadataSettings) + (*in).DeepCopyInto(*out) + } + if in.AllowedNodes != nil { + in, out := &in.AllowedNodes, &out.AllowedNodes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxmoxMachineSpec. +func (in *ProxmoxMachineSpec) DeepCopy() *ProxmoxMachineSpec { + if in == nil { + return nil + } + out := new(ProxmoxMachineSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxmoxMachineStatus) DeepCopyInto(out *ProxmoxMachineStatus) { + *out = *in + if in.Ready != nil { + in, out := &in.Ready, &out.Ready + *out = new(bool) + **out = **in + } + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]v1beta1.MachineAddress, len(*in)) + copy(*out, *in) + } + if in.VMStatus != nil { + in, out := &in.VMStatus, &out.VMStatus + *out = new(VirtualMachineState) + **out = **in + } + if in.BootstrapDataProvided != nil { + in, out := &in.BootstrapDataProvided, &out.BootstrapDataProvided + *out = new(bool) + **out = **in + } + if in.IPAddresses != nil { + in, out := &in.IPAddresses, &out.IPAddresses + *out = make(map[string]*IPAddresses, len(*in)) + for key, val := range *in { + var outVal *IPAddresses + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(IPAddresses) + (*in).DeepCopyInto(*out) + } + (*out)[key] = outVal + } + } + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = make([]NetworkStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProxmoxNode != nil { + in, out := &in.ProxmoxNode, &out.ProxmoxNode + *out = new(string) + **out = **in + } + if in.TaskRef != nil { + in, out := &in.TaskRef, &out.TaskRef + *out = new(string) + **out = **in + } + if in.RetryAfter != nil { + in, out := &in.RetryAfter, &out.RetryAfter + *out = (*in).DeepCopy() + } + if in.FailureReason != nil { + in, out := &in.FailureReason, &out.FailureReason + *out = new(errors.MachineStatusError) + **out = **in + } + if in.FailureMessage != nil { + in, out := &in.FailureMessage, &out.FailureMessage + *out = new(string) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(v1beta1.Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxmoxMachineStatus. +func (in *ProxmoxMachineStatus) DeepCopy() *ProxmoxMachineStatus { + if in == nil { + return nil + } + out := new(ProxmoxMachineStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxmoxMachineTemplate) DeepCopyInto(out *ProxmoxMachineTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxmoxMachineTemplate. +func (in *ProxmoxMachineTemplate) DeepCopy() *ProxmoxMachineTemplate { + if in == nil { + return nil + } + out := new(ProxmoxMachineTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProxmoxMachineTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxmoxMachineTemplateList) DeepCopyInto(out *ProxmoxMachineTemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ProxmoxMachineTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxmoxMachineTemplateList. +func (in *ProxmoxMachineTemplateList) DeepCopy() *ProxmoxMachineTemplateList { + if in == nil { + return nil + } + out := new(ProxmoxMachineTemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProxmoxMachineTemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxmoxMachineTemplateResource) DeepCopyInto(out *ProxmoxMachineTemplateResource) { + *out = *in + if in.ObjectMeta != nil { + in, out := &in.ObjectMeta, &out.ObjectMeta + *out = new(v1beta1.ObjectMeta) + (*in).DeepCopyInto(*out) + } + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(ProxmoxMachineSpec) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxmoxMachineTemplateResource. +func (in *ProxmoxMachineTemplateResource) DeepCopy() *ProxmoxMachineTemplateResource { + if in == nil { + return nil + } + out := new(ProxmoxMachineTemplateResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxmoxMachineTemplateSpec) DeepCopyInto(out *ProxmoxMachineTemplateSpec) { + *out = *in + in.Template.DeepCopyInto(&out.Template) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxmoxMachineTemplateSpec. +func (in *ProxmoxMachineTemplateSpec) DeepCopy() *ProxmoxMachineTemplateSpec { + if in == nil { + return nil + } + out := new(ProxmoxMachineTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteSpec) DeepCopyInto(out *RouteSpec) { + *out = *in + if in.To != nil { + in, out := &in.To, &out.To + *out = new(string) + **out = **in + } + if in.Via != nil { + in, out := &in.Via, &out.Via + *out = new(string) + **out = **in + } + if in.Metric != nil { + in, out := &in.Metric, &out.Metric + *out = new(int32) + **out = **in + } + if in.Table != nil { + in, out := &in.Table, &out.Table + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteSpec. +func (in *RouteSpec) DeepCopy() *RouteSpec { + if in == nil { + return nil + } + out := new(RouteSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Routing) DeepCopyInto(out *Routing) { + *out = *in + if in.Routes != nil { + in, out := &in.Routes, &out.Routes + *out = make([]RouteSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RoutingPolicy != nil { + in, out := &in.RoutingPolicy, &out.RoutingPolicy + *out = make([]RoutingPolicySpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Routing. +func (in *Routing) DeepCopy() *Routing { + if in == nil { + return nil + } + out := new(Routing) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingPolicySpec) DeepCopyInto(out *RoutingPolicySpec) { + *out = *in + if in.To != nil { + in, out := &in.To, &out.To + *out = new(string) + **out = **in + } + if in.From != nil { + in, out := &in.From, &out.From + *out = new(string) + **out = **in + } + if in.Table != nil { + in, out := &in.Table, &out.Table + *out = new(int32) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingPolicySpec. +func (in *RoutingPolicySpec) DeepCopy() *RoutingPolicySpec { + if in == nil { + return nil + } + out := new(RoutingPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulerHints) DeepCopyInto(out *SchedulerHints) { + *out = *in + if in.MemoryAdjustment != nil { + in, out := &in.MemoryAdjustment, &out.MemoryAdjustment + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerHints. +func (in *SchedulerHints) DeepCopy() *SchedulerHints { + if in == nil { + return nil + } + out := new(SchedulerHints) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Storage) DeepCopyInto(out *Storage) { + *out = *in + if in.BootVolume != nil { + in, out := &in.BootVolume, &out.BootVolume + *out = new(DiskSize) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Storage. +func (in *Storage) DeepCopy() *Storage { + if in == nil { + return nil + } + out := new(Storage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateSelector) DeepCopyInto(out *TemplateSelector) { + *out = *in + if in.MatchTags != nil { + in, out := &in.MatchTags, &out.MatchTags + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateSelector. +func (in *TemplateSelector) DeepCopy() *TemplateSelector { + if in == nil { + return nil + } + out := new(TemplateSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateSource) DeepCopyInto(out *TemplateSource) { + *out = *in + if in.SourceNode != nil { + in, out := &in.SourceNode, &out.SourceNode + *out = new(string) + **out = **in + } + if in.TemplateID != nil { + in, out := &in.TemplateID, &out.TemplateID + *out = new(int32) + **out = **in + } + if in.TemplateSelector != nil { + in, out := &in.TemplateSelector, &out.TemplateSelector + *out = new(TemplateSelector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateSource. +func (in *TemplateSource) DeepCopy() *TemplateSource { + if in == nil { + return nil + } + out := new(TemplateSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VMIDRange) DeepCopyInto(out *VMIDRange) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VMIDRange. +func (in *VMIDRange) DeepCopy() *VMIDRange { + if in == nil { + return nil + } + out := new(VMIDRange) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VRFDevice) DeepCopyInto(out *VRFDevice) { + *out = *in + if in.Interfaces != nil { + in, out := &in.Interfaces, &out.Interfaces + *out = make([]NetName, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + in.Routing.DeepCopyInto(&out.Routing) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VRFDevice. +func (in *VRFDevice) DeepCopy() *VRFDevice { + if in == nil { + return nil + } + out := new(VRFDevice) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachine) DeepCopyInto(out *VirtualMachine) { + *out = *in + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = make([]NetworkStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachine. +func (in *VirtualMachine) DeepCopy() *VirtualMachine { + if in == nil { + return nil + } + out := new(VirtualMachine) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineCloneSpec) DeepCopyInto(out *VirtualMachineCloneSpec) { + *out = *in + in.TemplateSource.DeepCopyInto(&out.TemplateSource) + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(TargetFileStorageFormat) + **out = **in + } + if in.Full != nil { + in, out := &in.Full, &out.Full + *out = new(bool) + **out = **in + } + if in.Pool != nil { + in, out := &in.Pool, &out.Pool + *out = new(string) + **out = **in + } + if in.SnapName != nil { + in, out := &in.SnapName, &out.SnapName + *out = new(string) + **out = **in + } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = new(string) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineCloneSpec. +func (in *VirtualMachineCloneSpec) DeepCopy() *VirtualMachineCloneSpec { + if in == nil { + return nil + } + out := new(VirtualMachineCloneSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkDevices) DeepCopyInto(out *VirtualNetworkDevices) { + *out = *in + if in.VRFs != nil { + in, out := &in.VRFs, &out.VRFs + *out = make([]VRFDevice, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkDevices. +func (in *VirtualNetworkDevices) DeepCopy() *VirtualNetworkDevices { + if in == nil { + return nil + } + out := new(VirtualNetworkDevices) + in.DeepCopyInto(out) + return out +} diff --git a/cmd/main.go b/cmd/main.go index c70afc07..6fc9691b 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -40,6 +40,7 @@ import ( "sigs.k8s.io/cluster-api/util/flags" "sigs.k8s.io/cluster-api/util/record" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/healthz" ctrlwebhook "sigs.k8s.io/controller-runtime/pkg/webhook" @@ -47,7 +48,7 @@ import ( // to ensure that exec-entrypoint and run can make use of them. _ "k8s.io/client-go/plugin/pkg/client/auth" - infrastructurev1alpha1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" + infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha2" "github.com/ionos-cloud/cluster-api-provider-proxmox/internal/controller" "github.com/ionos-cloud/cluster-api-provider-proxmox/internal/tlshelper" "github.com/ionos-cloud/cluster-api-provider-proxmox/internal/webhook" @@ -82,7 +83,7 @@ var ( func init() { _ = clusterv1.AddToScheme(scheme) _ = clientgoscheme.AddToScheme(scheme) - _ = infrastructurev1alpha1.AddToScheme(scheme) + _ = infrav1.AddToScheme(scheme) _ = ipamicv1.AddToScheme(scheme) _ = ipamv1.AddToScheme(scheme) @@ -151,6 +152,17 @@ func main() { os.Exit(1) } + // TODO: do I need this? + cache := mgr.GetCache() + + indexFunc := func(obj client.Object) []string { + return []string{obj.(*ipamv1.IPAddress).Spec.PoolRef.Name} + } + + if err = cache.IndexField(ctx, &ipamv1.IPAddress{}, "spec.poolRef.name", indexFunc); err != nil { + panic(err) + } + if enableWebhooks { if err = (&webhook.ProxmoxCluster{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "ProxmoxCluster") diff --git a/cmd/main_test.go b/cmd/main_test.go index a689ee0f..7cd6a6b7 100644 --- a/cmd/main_test.go +++ b/cmd/main_test.go @@ -19,7 +19,7 @@ import ( ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" ctrl "sigs.k8s.io/controller-runtime" - infrastructurev1alpha1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" + infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha2" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/proxmox/proxmoxtest" ) @@ -29,7 +29,7 @@ func TestSetupReconcilers(t *testing.T) { s := runtime.NewScheme() require.NoError(t, clientgoscheme.AddToScheme(s)) require.NoError(t, clusterv1.AddToScheme(s)) - require.NoError(t, infrastructurev1alpha1.AddToScheme(s)) + require.NoError(t, infrav1.AddToScheme(s)) require.NoError(t, ipamicv1.AddToScheme(s)) require.NoError(t, ipamv1.AddToScheme(s)) diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml index 35973a96..ac604fc2 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml @@ -1035,6 +1035,928 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: Cluster + jsonPath: .metadata.labels['cluster\.x-k8s\.io/cluster-name'] + name: Cluster + type: string + - description: Cluster infrastructure is ready + jsonPath: .status.ready + name: Ready + type: string + - description: API Endpoint + jsonPath: .spec.controlPlaneEndpoint + name: Endpoint + type: string + name: v1alpha2 + schema: + openAPIV3Schema: + description: ProxmoxCluster is the Schema for the proxmoxclusters API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec is the Proxmox Cluster spec + properties: + allowedNodes: + description: |- + allowedNodes specifies all Proxmox nodes which will be considered + for operations. This implies that VMs can be cloned on different nodes from + the node which holds the VM template. + items: + type: string + type: array + x-kubernetes-list-type: set + cloneSpec: + description: |- + cloneSpec is the configuration pertaining to all items configurable + in the configuration and cloning of a proxmox VM. Multiple types of nodes can be specified. + properties: + machineSpec: + additionalProperties: + description: ProxmoxMachineSpec defines the desired state of + a ProxmoxMachine. + properties: + allowedNodes: + description: |- + allowedNodes specifies all Proxmox nodes which will be considered + for operations. This implies that VMs can be cloned on different nodes from + the node which holds the VM template. + + This field is optional and should only be set if you want to restrict + the nodes where the VM can be cloned. + If not set, the ProxmoxCluster will be used to determine the nodes. + items: + type: string + type: array + x-kubernetes-list-type: set + checks: + description: checks defines possibles checks to skip. + properties: + skipCloudInitStatus: + description: skipCloudInitStatus skip checking CloudInit + status which can be useful with specific Operating + Systems like TalOS + type: boolean + skipQemuGuestAgent: + description: skipQemuGuestAgent skips checking QEMU + Agent readiness which can be useful with specific + Operating Systems like TalOS + type: boolean + type: object + description: + description: description for the new VM. + type: string + disks: + description: |- + disks contains a set of disk configuration options, + which will be applied before the first startup. + properties: + bootVolume: + description: |- + bootVolume defines the storage size for the boot volume. + This field is optional, and should only be set if you want + to change the size of the boot volume. + properties: + disk: + description: |- + disk is the name of the disk device that should be resized. + Example values are: ide[0-3], scsi[0-30], sata[0-5]. + minLength: 1 + type: string + sizeGb: + description: |- + sizeGb defines the size in gigabytes. + + As Proxmox does not support shrinking, the size + must be bigger than the already configured size in the + template. + format: int32 + minimum: 5 + type: integer + required: + - disk + - sizeGb + type: object + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf + type: object + format: + description: format for file storage. Only valid for full + clone. + enum: + - raw + - qcow2 + - vmdk + type: string + full: + default: true + description: |- + full Create a full copy of all disks. + This is always done when you clone a normal VM. + Create a Full clone by default. + type: boolean + memoryMiB: + description: |- + memoryMiB is the size of a virtual machine's memory, in MiB. + Defaults to the property value in the template from which the virtual machine is cloned. + format: int32 + multipleOf: 8 + type: integer + metadataSettings: + description: metadataSettings defines the metadata settings + for this machine's VM. + properties: + providerIDInjection: + description: |- + providerIDInjection enables the injection of the `providerID` into the cloudinit metadata. + this will basically set the `provider-id` field in the metadata to `proxmox://`. + type: boolean + required: + - providerIDInjection + type: object + network: + description: network is the network configuration for this + machine's VM. + properties: + networkDevices: + description: |- + networkDevices lists network devices. + net0 is always the default device. + items: + description: NetworkDevice defines the required details + of a virtual machine network device. + properties: + bridge: + description: bridge is the network bridge to attach + to the machine. + minLength: 1 + type: string + dnsServers: + description: |- + dnsServers contains information about nameservers to be used for this interface. + If this field is not set, it will use the default dns servers from the ProxmoxCluster. + items: + type: string + minItems: 1 + type: array + x-kubernetes-list-type: set + ipPoolRef: + description: |- + ipPoolRef is a reference to an IPAM Pool resource, which exposes IPv4 addresses. + The network device will use an available IP address from the referenced pool. + This can be combined with `IPv6PoolRef` in order to enable dual stack. + items: + description: |- + TypedLocalObjectReference contains enough information to let you locate the + typed referenced object inside the same namespace. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: ipPoolRef allows only IPAM apiGroup + ipam.cluster.x-k8s.io + rule: self.apiGroup == 'ipam.cluster.x-k8s.io' + - message: ipPoolRef allows either InClusterIPPool + or GlobalInClusterIPPool + rule: self.kind == 'InClusterIPPool' || self.kind + == 'GlobalInClusterIPPool' + type: array + x-kubernetes-list-type: atomic + linkMtu: + description: linkMtu is the network device Maximum + Transmission Unit. + format: int32 + type: integer + x-kubernetes-validations: + - message: invalid MTU value + rule: self == 1 || ( self >= 576 && self <= + 65520) + - message: invalid MTU value + rule: self == 1 || ( self >= 576 && self <= + 65520) + model: + default: virtio + description: model is the network device model. + enum: + - e1000 + - virtio + - rtl8139 + - vmxnet3 + type: string + mtu: + description: |- + mtu is the network device Maximum Transmission Unit. + When set to 1, virtio devices inherit the MTU value from the underlying bridge. + format: int32 + type: integer + x-kubernetes-validations: + - message: invalid MTU value + rule: self == 1 || ( self >= 576 && self <= + 65520) + - message: invalid MTU value + rule: self == 1 || ( self >= 576 && self <= + 65520) + name: + default: net0 + description: name is the network device name. + pattern: ^net[0-9]+$ + type: string + routes: + description: routes are the routes associated + with this interface. + items: + description: RouteSpec describes an IPv4/IPv6 + Route. + properties: + metric: + description: metric is the priority of the + route in the routing table. + format: int32 + minimum: 0 + type: integer + table: + description: table is the routing table + used for this route. + format: int32 + type: integer + to: + description: to is the subnet to be routed. + type: string + via: + description: via is the gateway to the subnet. + type: string + type: object + minItems: 1 + type: array + x-kubernetes-list-type: atomic + routingPolicy: + description: routingPolicy is an interface-specific + policy inserted into FIB (forwarding information + base). + items: + description: RoutingPolicySpec is a Linux FIB + rule. + properties: + from: + description: from is the subnet of the source. + type: string + priority: + description: priority is the position in + the ip rule FIB table. + format: int64 + maximum: 4294967295 + type: integer + x-kubernetes-validations: + - message: Cowardly refusing to insert FIB + rule matching kernel rules + rule: (self > 0 && self < 32765) || (self + > 32766) + table: + description: table is the routing table + ID. + format: int32 + type: integer + to: + description: to is the subnet of the target. + type: string + type: object + minItems: 1 + type: array + x-kubernetes-list-type: atomic + vlan: + description: vlan is the network L2 VLAN. + format: int32 + maximum: 4094 + minimum: 1 + type: integer + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + vrfs: + description: vrfs defines VRF Devices. + items: + description: VRFDevice defines Virtual Routing Flow + devices. + properties: + interfaces: + description: interfaces is the list of proxmox + network devices managed by this virtual device. + items: + description: NetName is a formally verified + Proxmox network name string. + pattern: ^net[0-9]+$ + type: string + type: array + x-kubernetes-list-type: set + name: + description: |- + name is the virtual network device name. + Must be unique within the virtual machine. + minLength: 3 + type: string + routes: + description: routes are the routes associated + with this interface. + items: + description: RouteSpec describes an IPv4/IPv6 + Route. + properties: + metric: + description: metric is the priority of the + route in the routing table. + format: int32 + minimum: 0 + type: integer + table: + description: table is the routing table + used for this route. + format: int32 + type: integer + to: + description: to is the subnet to be routed. + type: string + via: + description: via is the gateway to the subnet. + type: string + type: object + minItems: 1 + type: array + x-kubernetes-list-type: atomic + routingPolicy: + description: routingPolicy is an interface-specific + policy inserted into FIB (forwarding information + base). + items: + description: RoutingPolicySpec is a Linux FIB + rule. + properties: + from: + description: from is the subnet of the source. + type: string + priority: + description: priority is the position in + the ip rule FIB table. + format: int64 + maximum: 4294967295 + type: integer + x-kubernetes-validations: + - message: Cowardly refusing to insert FIB + rule matching kernel rules + rule: (self > 0 && self < 32765) || (self + > 32766) + table: + description: table is the routing table + ID. + format: int32 + type: integer + to: + description: to is the subnet of the target. + type: string + type: object + minItems: 1 + type: array + x-kubernetes-list-type: atomic + table: + description: table is the ID of the routing table + used for the l3mdev vrf device. + format: int32 + maximum: 4294967295 + minimum: 1 + type: integer + x-kubernetes-validations: + - message: Cowardly refusing to insert l3mdev + rules into kernel tables + rule: (self > 0 && self < 254) || (self > 255) + required: + - name + - table + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + numCores: + description: |- + numCores is the number of cores per CPU socket in a virtual machine. + Defaults to the property value in the template from which the virtual machine is cloned. + format: int32 + minimum: 1 + type: integer + numSockets: + description: |- + numSockets is the number of CPU sockets in a virtual machine. + Defaults to the property value in the template from which the virtual machine is cloned. + format: int32 + minimum: 1 + type: integer + pool: + description: pool Add the new VM to the specified pool. + type: string + providerID: + description: |- + providerID is the virtual machine BIOS UUID formatted as + proxmox://6c3fa683-bef9-4425-b413-eaa45a9d6191 + type: string + snapName: + description: snapName The name of the snapshot. + type: string + sourceNode: + description: |- + sourceNode is the initially selected proxmox node. + This node will be used to locate the template VM, which will + be used for cloning operations. + + Cloning will be performed according to the configuration. + Setting the `Target` field will tell Proxmox to clone the + VM on that target node. + + When Target is not set and the ProxmoxCluster contains + a set of `AllowedNodes`, the algorithm will instead evenly + distribute the VMs across the nodes from that list. + + If neither a `Target` nor `AllowedNodes` was set, the VM + will be cloned onto the same node as SourceNode. + minLength: 1 + type: string + storage: + description: storage for full clone. + type: string + tags: + description: tags is a list of tags to be applied to the + virtual machine. + items: + pattern: ^(?i)[a-z0-9_][a-z0-9_\-\+\.]*$ + type: string + minItems: 1 + type: array + x-kubernetes-list-type: set + target: + description: target node. Only allowed if the original VM + is on shared storage. + type: string + templateID: + description: templateID the vm_template vmid used for cloning + a new VM. + format: int32 + type: integer + templateSelector: + description: templateSelector defines MatchTags for looking + up VM templates. + properties: + matchTags: + description: |- + matchTags specifies all tags to look for when looking up the VM template. + Passed tags must be an exact 1:1 match with the tags on the template you want to use. + If multiple VM templates with the same set of tags are found, provisioning will fail. + items: + pattern: ^(?i)[a-z0-9_][a-z0-9_\-\+\.]*$ + type: string + minItems: 1 + type: array + x-kubernetes-list-type: set + required: + - matchTags + type: object + virtualMachineID: + description: virtualMachineID is the Proxmox identifier + for the ProxmoxMachine VM. + format: int64 + type: integer + vmIDRange: + description: vmIDRange is the range of VMIDs to use for + VMs. + properties: + end: + description: |- + end is the end of the VMID range to use for VMs. + Only used if VMIDRangeStart is set. + format: int64 + maximum: 999999999 + minimum: 100 + type: integer + start: + description: start is the start of the VMID range to + use for VMs. + format: int64 + maximum: 999999999 + minimum: 100 + type: integer + required: + - end + - start + type: object + x-kubernetes-validations: + - message: end should be greater than or equal to start + rule: self.end >= self.start + type: object + x-kubernetes-validations: + - message: Must set full=true when specifying format + rule: self.full || !has(self.format) + - message: Must set full=true when specifying storage + rule: self.full || !has(self.storage) + description: machineSpec is the map of machine specs + type: object + x-kubernetes-validations: + - message: Cowardly refusing to deploy cluster without control + plane + rule: has(self.controlPlane) + sshAuthorizedKeys: + description: sshAuthorizedKeys contains the authorized keys deployed + to the PROXMOX VMs. + items: + type: string + type: array + x-kubernetes-list-type: set + virtualIPNetworkInterface: + description: virtualIPNetworkInterface is the interface the k8s + control plane binds to. + type: string + required: + - machineSpec + type: object + controlPlaneEndpoint: + description: controlPlaneEndpoint represents the endpoint used to + communicate with the control plane. + properties: + host: + description: host is the hostname on which the API server is serving. + maxLength: 512 + type: string + port: + description: port is the port on which the API server is serving. + format: int32 + type: integer + required: + - host + - port + type: object + x-kubernetes-validations: + - message: port must be within 1-65535 + rule: self.port > 0 && self.port < 65536 + credentialsRef: + description: |- + credentialsRef is a reference to a Secret that contains the credentials to use for provisioning this cluster. If not + supplied then the credentials of the controller will be used. + if no namespace is provided, the namespace of the ProxmoxCluster will be used. + properties: + name: + description: name is unique within a namespace to reference a + secret resource. + type: string + namespace: + description: namespace defines the space within which the secret + name must be unique. + type: string + type: object + x-kubernetes-map-type: atomic + dnsServers: + description: dnsServers contains information about nameservers used + by the machines. + items: + type: string + minItems: 1 + type: array + x-kubernetes-list-type: set + externalManagedControlPlane: + default: false + description: |- + externalManagedControlPlane can be enabled to allow externally managed Control Planes to patch the + Proxmox cluster with the Load Balancer IP provided by Control Plane provider. + type: boolean + ipv4Config: + description: |- + ipv4Config contains information about available IPv4 address pools and the gateway. + This can be combined with ipv6Config in order to enable dual stack. + Either IPv4Config or IPv6Config must be provided. + properties: + addresses: + description: addresses is a list of IP addresses that can be assigned. + This set of addresses can be non-contiguous. + items: + type: string + type: array + x-kubernetes-list-type: set + gateway: + description: gateway is the network gateway + minLength: 1 + type: string + metric: + default: 100 + description: metric is the route priority applied to the default + gateway + format: int32 + minimum: 0 + type: integer + prefix: + description: prefix is the network prefix to use. + format: int32 + maximum: 128 + minimum: 1 + type: integer + required: + - addresses + - gateway + - metric + - prefix + type: object + x-kubernetes-validations: + - message: IPv4Config addresses must be provided + rule: self.addresses.size() > 0 + ipv6Config: + description: |- + ipv6Config contains information about available IPv6 address pools and the gateway. + This can be combined with ipv4Config in order to enable dual stack. + Either IPv4Config or IPv6Config must be provided. + properties: + addresses: + description: addresses is a list of IP addresses that can be assigned. + This set of addresses can be non-contiguous. + items: + type: string + type: array + x-kubernetes-list-type: set + gateway: + description: gateway is the network gateway + minLength: 1 + type: string + metric: + default: 100 + description: metric is the route priority applied to the default + gateway + format: int32 + minimum: 0 + type: integer + prefix: + description: prefix is the network prefix to use. + format: int32 + maximum: 128 + minimum: 1 + type: integer + required: + - addresses + - gateway + - metric + - prefix + type: object + x-kubernetes-validations: + - message: IPv6Config addresses must be provided + rule: self.addresses.size() > 0 + schedulerHints: + description: |- + schedulerHints allows to influence the decision on where a VM will be scheduled. For example by applying a multiplicator + to a node's resources, to allow for overprovisioning or to ensure a node will always have a safety buffer. + properties: + memoryAdjustment: + description: |- + memoryAdjustment allows to adjust a node's memory by a given percentage. + For example, setting it to 300 allows to allocate 300% of a host's memory for VMs, + and setting it to 95 limits memory allocation to 95% of a host's memory. + Setting it to 0 entirely disables scheduling memory constraints. + By default 100% of a node's memory will be used for allocation. + format: int64 + minimum: 0 + type: integer + type: object + required: + - dnsServers + type: object + x-kubernetes-validations: + - message: at least one ip config must be set, either ipv4Config or ipv6Config + rule: self.ipv4Config != null || self.ipv6Config != null + status: + description: status is the Proxmox Cluster status + properties: + conditions: + description: conditions defines current service state of the ProxmoxCluster. + items: + description: Condition defines an observation of a Cluster API resource + operational state. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This field may be empty. + maxLength: 10240 + minLength: 1 + type: string + reason: + description: |- + reason is the reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may be empty. + maxLength: 256 + minLength: 1 + type: string + severity: + description: |- + severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. + maxLength: 32 + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. + maxLength: 256 + minLength: 1 + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + failureMessage: + description: |- + failureMessage will be set in the event that there is a terminal problem + reconciling the Machine and will contain a more verbose string suitable + for logging and human consumption. + + This field should not be set for transitive errors that a controller + faces that are expected to be fixed automatically over + time (like service outages), but instead indicate that something is + fundamentally wrong with the Machine's spec or the configuration of + the controller, and that manual intervention is required. Examples + of terminal errors would be invalid combinations of settings in the + spec, values that are unsupported by the controller, or the + responsible controller itself being critically misconfigured. + + Any transient errors that occur during the reconciliation of ProxmoxMachines + can be added as events to the ProxmoxCluster object and/or logged in the + controller's output. + type: string + failureReason: + description: |- + failureReason will be set in the event that there is a terminal problem + reconciling the Machine and will contain a succinct value suitable + for machine interpretation. + + This field should not be set for transitive errors that a controller + faces that are expected to be fixed automatically over + time (like service outages), but instead indicate that something is + fundamentally wrong with the Machine's spec or the configuration of + the controller, and that manual intervention is required. Examples + of terminal errors would be invalid combinations of settings in the + spec, values that are unsupported by the controller, or the + responsible controller itself being critically misconfigured. + + Any transient errors that occur during the reconciliation of ProxmoxCluster + can be added as events to the ProxmoxCluster object and/or logged in the + controller's output. + type: string + inClusterIpPoolRef: + description: inClusterIpPoolRef is the reference to the created in-cluster + IP pool. + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + nodeLocations: + description: |- + nodeLocations keeps track of which nodes have been selected + for different machines. + properties: + controlPlane: + description: controlPlane contains all deployed control plane + nodes. + items: + description: |- + NodeLocation holds information about a single VM + in Proxmox. + properties: + machine: + description: machine is the reference to the ProxmoxMachine + that the node is on. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + node: + description: node is the Proxmox node. + minLength: 1 + type: string + required: + - machine + - node + type: object + type: array + x-kubernetes-list-type: atomic + workers: + description: workers contains all deployed worker nodes. + items: + description: |- + NodeLocation holds information about a single VM + in Proxmox. + properties: + machine: + description: machine is the reference to the ProxmoxMachine + that the node is on. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + node: + description: node is the Proxmox node. + minLength: 1 + type: string + required: + - machine + - node + type: object + type: array + x-kubernetes-list-type: atomic + type: object + ready: + default: false + description: ready indicates that the cluster is ready. + type: boolean + type: object + required: + - spec + type: object + served: true storage: true subresources: status: {} diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml index 7efef5ec..ba47ba30 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml @@ -898,6 +898,778 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - name: v1alpha2 + schema: + openAPIV3Schema: + description: ProxmoxClusterTemplate is the Schema for the proxmoxclustertemplates + API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec is the Proxmox Cluster Template spec + properties: + template: + description: template is the Proxmox Cluster template + properties: + metadata: + description: |- + metadata is the standard object metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + labels is a map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + type: object + spec: + description: spec is the Proxmox Cluster spec + properties: + allowedNodes: + description: |- + allowedNodes specifies all Proxmox nodes which will be considered + for operations. This implies that VMs can be cloned on different nodes from + the node which holds the VM template. + items: + type: string + type: array + x-kubernetes-list-type: set + cloneSpec: + description: |- + cloneSpec is the configuration pertaining to all items configurable + in the configuration and cloning of a proxmox VM. Multiple types of nodes can be specified. + properties: + machineSpec: + additionalProperties: + description: ProxmoxMachineSpec defines the desired + state of a ProxmoxMachine. + properties: + allowedNodes: + description: |- + allowedNodes specifies all Proxmox nodes which will be considered + for operations. This implies that VMs can be cloned on different nodes from + the node which holds the VM template. + + This field is optional and should only be set if you want to restrict + the nodes where the VM can be cloned. + If not set, the ProxmoxCluster will be used to determine the nodes. + items: + type: string + type: array + x-kubernetes-list-type: set + checks: + description: checks defines possibles checks to + skip. + properties: + skipCloudInitStatus: + description: skipCloudInitStatus skip checking + CloudInit status which can be useful with + specific Operating Systems like TalOS + type: boolean + skipQemuGuestAgent: + description: skipQemuGuestAgent skips checking + QEMU Agent readiness which can be useful with + specific Operating Systems like TalOS + type: boolean + type: object + description: + description: description for the new VM. + type: string + disks: + description: |- + disks contains a set of disk configuration options, + which will be applied before the first startup. + properties: + bootVolume: + description: |- + bootVolume defines the storage size for the boot volume. + This field is optional, and should only be set if you want + to change the size of the boot volume. + properties: + disk: + description: |- + disk is the name of the disk device that should be resized. + Example values are: ide[0-3], scsi[0-30], sata[0-5]. + minLength: 1 + type: string + sizeGb: + description: |- + sizeGb defines the size in gigabytes. + + As Proxmox does not support shrinking, the size + must be bigger than the already configured size in the + template. + format: int32 + minimum: 5 + type: integer + required: + - disk + - sizeGb + type: object + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf + type: object + format: + description: format for file storage. Only valid + for full clone. + enum: + - raw + - qcow2 + - vmdk + type: string + full: + default: true + description: |- + full Create a full copy of all disks. + This is always done when you clone a normal VM. + Create a Full clone by default. + type: boolean + memoryMiB: + description: |- + memoryMiB is the size of a virtual machine's memory, in MiB. + Defaults to the property value in the template from which the virtual machine is cloned. + format: int32 + multipleOf: 8 + type: integer + metadataSettings: + description: metadataSettings defines the metadata + settings for this machine's VM. + properties: + providerIDInjection: + description: |- + providerIDInjection enables the injection of the `providerID` into the cloudinit metadata. + this will basically set the `provider-id` field in the metadata to `proxmox://`. + type: boolean + required: + - providerIDInjection + type: object + network: + description: network is the network configuration + for this machine's VM. + properties: + networkDevices: + description: |- + networkDevices lists network devices. + net0 is always the default device. + items: + description: NetworkDevice defines the required + details of a virtual machine network device. + properties: + bridge: + description: bridge is the network bridge + to attach to the machine. + minLength: 1 + type: string + dnsServers: + description: |- + dnsServers contains information about nameservers to be used for this interface. + If this field is not set, it will use the default dns servers from the ProxmoxCluster. + items: + type: string + minItems: 1 + type: array + x-kubernetes-list-type: set + ipPoolRef: + description: |- + ipPoolRef is a reference to an IPAM Pool resource, which exposes IPv4 addresses. + The network device will use an available IP address from the referenced pool. + This can be combined with `IPv6PoolRef` in order to enable dual stack. + items: + description: |- + TypedLocalObjectReference contains enough information to let you locate the + typed referenced object inside the same namespace. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of + resource being referenced + type: string + name: + description: Name is the name of + resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: ipPoolRef allows only IPAM + apiGroup ipam.cluster.x-k8s.io + rule: self.apiGroup == 'ipam.cluster.x-k8s.io' + - message: ipPoolRef allows either InClusterIPPool + or GlobalInClusterIPPool + rule: self.kind == 'InClusterIPPool' + || self.kind == 'GlobalInClusterIPPool' + type: array + x-kubernetes-list-type: atomic + linkMtu: + description: linkMtu is the network device + Maximum Transmission Unit. + format: int32 + type: integer + x-kubernetes-validations: + - message: invalid MTU value + rule: self == 1 || ( self >= 576 && + self <= 65520) + - message: invalid MTU value + rule: self == 1 || ( self >= 576 && + self <= 65520) + model: + default: virtio + description: model is the network device + model. + enum: + - e1000 + - virtio + - rtl8139 + - vmxnet3 + type: string + mtu: + description: |- + mtu is the network device Maximum Transmission Unit. + When set to 1, virtio devices inherit the MTU value from the underlying bridge. + format: int32 + type: integer + x-kubernetes-validations: + - message: invalid MTU value + rule: self == 1 || ( self >= 576 && + self <= 65520) + - message: invalid MTU value + rule: self == 1 || ( self >= 576 && + self <= 65520) + name: + default: net0 + description: name is the network device + name. + pattern: ^net[0-9]+$ + type: string + routes: + description: routes are the routes associated + with this interface. + items: + description: RouteSpec describes an + IPv4/IPv6 Route. + properties: + metric: + description: metric is the priority + of the route in the routing table. + format: int32 + minimum: 0 + type: integer + table: + description: table is the routing + table used for this route. + format: int32 + type: integer + to: + description: to is the subnet to + be routed. + type: string + via: + description: via is the gateway + to the subnet. + type: string + type: object + minItems: 1 + type: array + x-kubernetes-list-type: atomic + routingPolicy: + description: routingPolicy is an interface-specific + policy inserted into FIB (forwarding + information base). + items: + description: RoutingPolicySpec is a + Linux FIB rule. + properties: + from: + description: from is the subnet + of the source. + type: string + priority: + description: priority is the position + in the ip rule FIB table. + format: int64 + maximum: 4294967295 + type: integer + x-kubernetes-validations: + - message: Cowardly refusing to + insert FIB rule matching kernel + rules + rule: (self > 0 && self < 32765) + || (self > 32766) + table: + description: table is the routing + table ID. + format: int32 + type: integer + to: + description: to is the subnet of + the target. + type: string + type: object + minItems: 1 + type: array + x-kubernetes-list-type: atomic + vlan: + description: vlan is the network L2 VLAN. + format: int32 + maximum: 4094 + minimum: 1 + type: integer + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + vrfs: + description: vrfs defines VRF Devices. + items: + description: VRFDevice defines Virtual Routing + Flow devices. + properties: + interfaces: + description: interfaces is the list of + proxmox network devices managed by this + virtual device. + items: + description: NetName is a formally verified + Proxmox network name string. + pattern: ^net[0-9]+$ + type: string + type: array + x-kubernetes-list-type: set + name: + description: |- + name is the virtual network device name. + Must be unique within the virtual machine. + minLength: 3 + type: string + routes: + description: routes are the routes associated + with this interface. + items: + description: RouteSpec describes an + IPv4/IPv6 Route. + properties: + metric: + description: metric is the priority + of the route in the routing table. + format: int32 + minimum: 0 + type: integer + table: + description: table is the routing + table used for this route. + format: int32 + type: integer + to: + description: to is the subnet to + be routed. + type: string + via: + description: via is the gateway + to the subnet. + type: string + type: object + minItems: 1 + type: array + x-kubernetes-list-type: atomic + routingPolicy: + description: routingPolicy is an interface-specific + policy inserted into FIB (forwarding + information base). + items: + description: RoutingPolicySpec is a + Linux FIB rule. + properties: + from: + description: from is the subnet + of the source. + type: string + priority: + description: priority is the position + in the ip rule FIB table. + format: int64 + maximum: 4294967295 + type: integer + x-kubernetes-validations: + - message: Cowardly refusing to + insert FIB rule matching kernel + rules + rule: (self > 0 && self < 32765) + || (self > 32766) + table: + description: table is the routing + table ID. + format: int32 + type: integer + to: + description: to is the subnet of + the target. + type: string + type: object + minItems: 1 + type: array + x-kubernetes-list-type: atomic + table: + description: table is the ID of the routing + table used for the l3mdev vrf device. + format: int32 + maximum: 4294967295 + minimum: 1 + type: integer + x-kubernetes-validations: + - message: Cowardly refusing to insert + l3mdev rules into kernel tables + rule: (self > 0 && self < 254) || (self + > 255) + required: + - name + - table + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + numCores: + description: |- + numCores is the number of cores per CPU socket in a virtual machine. + Defaults to the property value in the template from which the virtual machine is cloned. + format: int32 + minimum: 1 + type: integer + numSockets: + description: |- + numSockets is the number of CPU sockets in a virtual machine. + Defaults to the property value in the template from which the virtual machine is cloned. + format: int32 + minimum: 1 + type: integer + pool: + description: pool Add the new VM to the specified + pool. + type: string + providerID: + description: |- + providerID is the virtual machine BIOS UUID formatted as + proxmox://6c3fa683-bef9-4425-b413-eaa45a9d6191 + type: string + snapName: + description: snapName The name of the snapshot. + type: string + sourceNode: + description: |- + sourceNode is the initially selected proxmox node. + This node will be used to locate the template VM, which will + be used for cloning operations. + + Cloning will be performed according to the configuration. + Setting the `Target` field will tell Proxmox to clone the + VM on that target node. + + When Target is not set and the ProxmoxCluster contains + a set of `AllowedNodes`, the algorithm will instead evenly + distribute the VMs across the nodes from that list. + + If neither a `Target` nor `AllowedNodes` was set, the VM + will be cloned onto the same node as SourceNode. + minLength: 1 + type: string + storage: + description: storage for full clone. + type: string + tags: + description: tags is a list of tags to be applied + to the virtual machine. + items: + pattern: ^(?i)[a-z0-9_][a-z0-9_\-\+\.]*$ + type: string + minItems: 1 + type: array + x-kubernetes-list-type: set + target: + description: target node. Only allowed if the original + VM is on shared storage. + type: string + templateID: + description: templateID the vm_template vmid used + for cloning a new VM. + format: int32 + type: integer + templateSelector: + description: templateSelector defines MatchTags + for looking up VM templates. + properties: + matchTags: + description: |- + matchTags specifies all tags to look for when looking up the VM template. + Passed tags must be an exact 1:1 match with the tags on the template you want to use. + If multiple VM templates with the same set of tags are found, provisioning will fail. + items: + pattern: ^(?i)[a-z0-9_][a-z0-9_\-\+\.]*$ + type: string + minItems: 1 + type: array + x-kubernetes-list-type: set + required: + - matchTags + type: object + virtualMachineID: + description: virtualMachineID is the Proxmox identifier + for the ProxmoxMachine VM. + format: int64 + type: integer + vmIDRange: + description: vmIDRange is the range of VMIDs to + use for VMs. + properties: + end: + description: |- + end is the end of the VMID range to use for VMs. + Only used if VMIDRangeStart is set. + format: int64 + maximum: 999999999 + minimum: 100 + type: integer + start: + description: start is the start of the VMID + range to use for VMs. + format: int64 + maximum: 999999999 + minimum: 100 + type: integer + required: + - end + - start + type: object + x-kubernetes-validations: + - message: end should be greater than or equal to + start + rule: self.end >= self.start + type: object + x-kubernetes-validations: + - message: Must set full=true when specifying format + rule: self.full || !has(self.format) + - message: Must set full=true when specifying storage + rule: self.full || !has(self.storage) + description: machineSpec is the map of machine specs + type: object + x-kubernetes-validations: + - message: Cowardly refusing to deploy cluster without + control plane + rule: has(self.controlPlane) + sshAuthorizedKeys: + description: sshAuthorizedKeys contains the authorized + keys deployed to the PROXMOX VMs. + items: + type: string + type: array + x-kubernetes-list-type: set + virtualIPNetworkInterface: + description: virtualIPNetworkInterface is the interface + the k8s control plane binds to. + type: string + required: + - machineSpec + type: object + controlPlaneEndpoint: + description: controlPlaneEndpoint represents the endpoint + used to communicate with the control plane. + properties: + host: + description: host is the hostname on which the API server + is serving. + maxLength: 512 + type: string + port: + description: port is the port on which the API server + is serving. + format: int32 + type: integer + required: + - host + - port + type: object + x-kubernetes-validations: + - message: port must be within 1-65535 + rule: self.port > 0 && self.port < 65536 + credentialsRef: + description: |- + credentialsRef is a reference to a Secret that contains the credentials to use for provisioning this cluster. If not + supplied then the credentials of the controller will be used. + if no namespace is provided, the namespace of the ProxmoxCluster will be used. + properties: + name: + description: name is unique within a namespace to reference + a secret resource. + type: string + namespace: + description: namespace defines the space within which + the secret name must be unique. + type: string + type: object + x-kubernetes-map-type: atomic + dnsServers: + description: dnsServers contains information about nameservers + used by the machines. + items: + type: string + minItems: 1 + type: array + x-kubernetes-list-type: set + externalManagedControlPlane: + default: false + description: |- + externalManagedControlPlane can be enabled to allow externally managed Control Planes to patch the + Proxmox cluster with the Load Balancer IP provided by Control Plane provider. + type: boolean + ipv4Config: + description: |- + ipv4Config contains information about available IPv4 address pools and the gateway. + This can be combined with ipv6Config in order to enable dual stack. + Either IPv4Config or IPv6Config must be provided. + properties: + addresses: + description: addresses is a list of IP addresses that + can be assigned. This set of addresses can be non-contiguous. + items: + type: string + type: array + x-kubernetes-list-type: set + gateway: + description: gateway is the network gateway + minLength: 1 + type: string + metric: + default: 100 + description: metric is the route priority applied to the + default gateway + format: int32 + minimum: 0 + type: integer + prefix: + description: prefix is the network prefix to use. + format: int32 + maximum: 128 + minimum: 1 + type: integer + required: + - addresses + - gateway + - metric + - prefix + type: object + x-kubernetes-validations: + - message: IPv4Config addresses must be provided + rule: self.addresses.size() > 0 + ipv6Config: + description: |- + ipv6Config contains information about available IPv6 address pools and the gateway. + This can be combined with ipv4Config in order to enable dual stack. + Either IPv4Config or IPv6Config must be provided. + properties: + addresses: + description: addresses is a list of IP addresses that + can be assigned. This set of addresses can be non-contiguous. + items: + type: string + type: array + x-kubernetes-list-type: set + gateway: + description: gateway is the network gateway + minLength: 1 + type: string + metric: + default: 100 + description: metric is the route priority applied to the + default gateway + format: int32 + minimum: 0 + type: integer + prefix: + description: prefix is the network prefix to use. + format: int32 + maximum: 128 + minimum: 1 + type: integer + required: + - addresses + - gateway + - metric + - prefix + type: object + x-kubernetes-validations: + - message: IPv6Config addresses must be provided + rule: self.addresses.size() > 0 + schedulerHints: + description: |- + schedulerHints allows to influence the decision on where a VM will be scheduled. For example by applying a multiplicator + to a node's resources, to allow for overprovisioning or to ensure a node will always have a safety buffer. + properties: + memoryAdjustment: + description: |- + memoryAdjustment allows to adjust a node's memory by a given percentage. + For example, setting it to 300 allows to allocate 300% of a host's memory for VMs, + and setting it to 95 limits memory allocation to 95% of a host's memory. + Setting it to 0 entirely disables scheduling memory constraints. + By default 100% of a node's memory will be used for allocation. + format: int64 + minimum: 0 + type: integer + type: object + required: + - dnsServers + type: object + required: + - spec + type: object + required: + - template + type: object + required: + - spec + type: object + served: true storage: true subresources: status: {} diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml index 58861b3e..c01e6066 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml @@ -852,6 +852,745 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: Cluster to which this ProxmoxMachine belongs + jsonPath: .metadata.labels.cluster\.x-k8s\.io/cluster-name + name: Cluster + type: string + - description: Machine ready status + jsonPath: .status.ready + name: Ready + type: string + - description: Proxmox Node that the machine was deployed on + jsonPath: .status.proxmoxNode + name: Node + type: string + - description: Provider ID + jsonPath: .spec.providerID + name: Provider_ID + type: string + - description: Machine object which owns with this ProxmoxMachine + jsonPath: .metadata.ownerReferences[?(@.kind=="Machine")].name + name: Machine + type: string + name: v1alpha2 + schema: + openAPIV3Schema: + description: ProxmoxMachine is the Schema for the proxmoxmachines API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec is the Proxmox machine spec. + properties: + allowedNodes: + description: |- + allowedNodes specifies all Proxmox nodes which will be considered + for operations. This implies that VMs can be cloned on different nodes from + the node which holds the VM template. + + This field is optional and should only be set if you want to restrict + the nodes where the VM can be cloned. + If not set, the ProxmoxCluster will be used to determine the nodes. + items: + type: string + type: array + x-kubernetes-list-type: set + checks: + description: checks defines possibles checks to skip. + properties: + skipCloudInitStatus: + description: skipCloudInitStatus skip checking CloudInit status + which can be useful with specific Operating Systems like TalOS + type: boolean + skipQemuGuestAgent: + description: skipQemuGuestAgent skips checking QEMU Agent readiness + which can be useful with specific Operating Systems like TalOS + type: boolean + type: object + description: + description: description for the new VM. + type: string + disks: + description: |- + disks contains a set of disk configuration options, + which will be applied before the first startup. + properties: + bootVolume: + description: |- + bootVolume defines the storage size for the boot volume. + This field is optional, and should only be set if you want + to change the size of the boot volume. + properties: + disk: + description: |- + disk is the name of the disk device that should be resized. + Example values are: ide[0-3], scsi[0-30], sata[0-5]. + minLength: 1 + type: string + sizeGb: + description: |- + sizeGb defines the size in gigabytes. + + As Proxmox does not support shrinking, the size + must be bigger than the already configured size in the + template. + format: int32 + minimum: 5 + type: integer + required: + - disk + - sizeGb + type: object + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf + type: object + format: + description: format for file storage. Only valid for full clone. + enum: + - raw + - qcow2 + - vmdk + type: string + full: + default: true + description: |- + full Create a full copy of all disks. + This is always done when you clone a normal VM. + Create a Full clone by default. + type: boolean + memoryMiB: + description: |- + memoryMiB is the size of a virtual machine's memory, in MiB. + Defaults to the property value in the template from which the virtual machine is cloned. + format: int32 + multipleOf: 8 + type: integer + metadataSettings: + description: metadataSettings defines the metadata settings for this + machine's VM. + properties: + providerIDInjection: + description: |- + providerIDInjection enables the injection of the `providerID` into the cloudinit metadata. + this will basically set the `provider-id` field in the metadata to `proxmox://`. + type: boolean + required: + - providerIDInjection + type: object + network: + description: network is the network configuration for this machine's + VM. + properties: + networkDevices: + description: |- + networkDevices lists network devices. + net0 is always the default device. + items: + description: NetworkDevice defines the required details of a + virtual machine network device. + properties: + bridge: + description: bridge is the network bridge to attach to the + machine. + minLength: 1 + type: string + dnsServers: + description: |- + dnsServers contains information about nameservers to be used for this interface. + If this field is not set, it will use the default dns servers from the ProxmoxCluster. + items: + type: string + minItems: 1 + type: array + x-kubernetes-list-type: set + ipPoolRef: + description: |- + ipPoolRef is a reference to an IPAM Pool resource, which exposes IPv4 addresses. + The network device will use an available IP address from the referenced pool. + This can be combined with `IPv6PoolRef` in order to enable dual stack. + items: + description: |- + TypedLocalObjectReference contains enough information to let you locate the + typed referenced object inside the same namespace. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: ipPoolRef allows only IPAM apiGroup ipam.cluster.x-k8s.io + rule: self.apiGroup == 'ipam.cluster.x-k8s.io' + - message: ipPoolRef allows either InClusterIPPool or + GlobalInClusterIPPool + rule: self.kind == 'InClusterIPPool' || self.kind == + 'GlobalInClusterIPPool' + type: array + x-kubernetes-list-type: atomic + linkMtu: + description: linkMtu is the network device Maximum Transmission + Unit. + format: int32 + type: integer + x-kubernetes-validations: + - message: invalid MTU value + rule: self == 1 || ( self >= 576 && self <= 65520) + - message: invalid MTU value + rule: self == 1 || ( self >= 576 && self <= 65520) + model: + default: virtio + description: model is the network device model. + enum: + - e1000 + - virtio + - rtl8139 + - vmxnet3 + type: string + mtu: + description: |- + mtu is the network device Maximum Transmission Unit. + When set to 1, virtio devices inherit the MTU value from the underlying bridge. + format: int32 + type: integer + x-kubernetes-validations: + - message: invalid MTU value + rule: self == 1 || ( self >= 576 && self <= 65520) + - message: invalid MTU value + rule: self == 1 || ( self >= 576 && self <= 65520) + name: + default: net0 + description: name is the network device name. + pattern: ^net[0-9]+$ + type: string + routes: + description: routes are the routes associated with this + interface. + items: + description: RouteSpec describes an IPv4/IPv6 Route. + properties: + metric: + description: metric is the priority of the route in + the routing table. + format: int32 + minimum: 0 + type: integer + table: + description: table is the routing table used for this + route. + format: int32 + type: integer + to: + description: to is the subnet to be routed. + type: string + via: + description: via is the gateway to the subnet. + type: string + type: object + minItems: 1 + type: array + x-kubernetes-list-type: atomic + routingPolicy: + description: routingPolicy is an interface-specific policy + inserted into FIB (forwarding information base). + items: + description: RoutingPolicySpec is a Linux FIB rule. + properties: + from: + description: from is the subnet of the source. + type: string + priority: + description: priority is the position in the ip rule + FIB table. + format: int64 + maximum: 4294967295 + type: integer + x-kubernetes-validations: + - message: Cowardly refusing to insert FIB rule matching + kernel rules + rule: (self > 0 && self < 32765) || (self > 32766) + table: + description: table is the routing table ID. + format: int32 + type: integer + to: + description: to is the subnet of the target. + type: string + type: object + minItems: 1 + type: array + x-kubernetes-list-type: atomic + vlan: + description: vlan is the network L2 VLAN. + format: int32 + maximum: 4094 + minimum: 1 + type: integer + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + vrfs: + description: vrfs defines VRF Devices. + items: + description: VRFDevice defines Virtual Routing Flow devices. + properties: + interfaces: + description: interfaces is the list of proxmox network devices + managed by this virtual device. + items: + description: NetName is a formally verified Proxmox network + name string. + pattern: ^net[0-9]+$ + type: string + type: array + x-kubernetes-list-type: set + name: + description: |- + name is the virtual network device name. + Must be unique within the virtual machine. + minLength: 3 + type: string + routes: + description: routes are the routes associated with this + interface. + items: + description: RouteSpec describes an IPv4/IPv6 Route. + properties: + metric: + description: metric is the priority of the route in + the routing table. + format: int32 + minimum: 0 + type: integer + table: + description: table is the routing table used for this + route. + format: int32 + type: integer + to: + description: to is the subnet to be routed. + type: string + via: + description: via is the gateway to the subnet. + type: string + type: object + minItems: 1 + type: array + x-kubernetes-list-type: atomic + routingPolicy: + description: routingPolicy is an interface-specific policy + inserted into FIB (forwarding information base). + items: + description: RoutingPolicySpec is a Linux FIB rule. + properties: + from: + description: from is the subnet of the source. + type: string + priority: + description: priority is the position in the ip rule + FIB table. + format: int64 + maximum: 4294967295 + type: integer + x-kubernetes-validations: + - message: Cowardly refusing to insert FIB rule matching + kernel rules + rule: (self > 0 && self < 32765) || (self > 32766) + table: + description: table is the routing table ID. + format: int32 + type: integer + to: + description: to is the subnet of the target. + type: string + type: object + minItems: 1 + type: array + x-kubernetes-list-type: atomic + table: + description: table is the ID of the routing table used for + the l3mdev vrf device. + format: int32 + maximum: 4294967295 + minimum: 1 + type: integer + x-kubernetes-validations: + - message: Cowardly refusing to insert l3mdev rules into + kernel tables + rule: (self > 0 && self < 254) || (self > 255) + required: + - name + - table + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + numCores: + description: |- + numCores is the number of cores per CPU socket in a virtual machine. + Defaults to the property value in the template from which the virtual machine is cloned. + format: int32 + minimum: 1 + type: integer + numSockets: + description: |- + numSockets is the number of CPU sockets in a virtual machine. + Defaults to the property value in the template from which the virtual machine is cloned. + format: int32 + minimum: 1 + type: integer + pool: + description: pool Add the new VM to the specified pool. + type: string + providerID: + description: |- + providerID is the virtual machine BIOS UUID formatted as + proxmox://6c3fa683-bef9-4425-b413-eaa45a9d6191 + type: string + snapName: + description: snapName The name of the snapshot. + type: string + sourceNode: + description: |- + sourceNode is the initially selected proxmox node. + This node will be used to locate the template VM, which will + be used for cloning operations. + + Cloning will be performed according to the configuration. + Setting the `Target` field will tell Proxmox to clone the + VM on that target node. + + When Target is not set and the ProxmoxCluster contains + a set of `AllowedNodes`, the algorithm will instead evenly + distribute the VMs across the nodes from that list. + + If neither a `Target` nor `AllowedNodes` was set, the VM + will be cloned onto the same node as SourceNode. + minLength: 1 + type: string + storage: + description: storage for full clone. + type: string + tags: + description: tags is a list of tags to be applied to the virtual machine. + items: + pattern: ^(?i)[a-z0-9_][a-z0-9_\-\+\.]*$ + type: string + minItems: 1 + type: array + x-kubernetes-list-type: set + target: + description: target node. Only allowed if the original VM is on shared + storage. + type: string + templateID: + description: templateID the vm_template vmid used for cloning a new + VM. + format: int32 + type: integer + templateSelector: + description: templateSelector defines MatchTags for looking up VM + templates. + properties: + matchTags: + description: |- + matchTags specifies all tags to look for when looking up the VM template. + Passed tags must be an exact 1:1 match with the tags on the template you want to use. + If multiple VM templates with the same set of tags are found, provisioning will fail. + items: + pattern: ^(?i)[a-z0-9_][a-z0-9_\-\+\.]*$ + type: string + minItems: 1 + type: array + x-kubernetes-list-type: set + required: + - matchTags + type: object + virtualMachineID: + description: virtualMachineID is the Proxmox identifier for the ProxmoxMachine + VM. + format: int64 + type: integer + vmIDRange: + description: vmIDRange is the range of VMIDs to use for VMs. + properties: + end: + description: |- + end is the end of the VMID range to use for VMs. + Only used if VMIDRangeStart is set. + format: int64 + maximum: 999999999 + minimum: 100 + type: integer + start: + description: start is the start of the VMID range to use for VMs. + format: int64 + maximum: 999999999 + minimum: 100 + type: integer + required: + - end + - start + type: object + x-kubernetes-validations: + - message: end should be greater than or equal to start + rule: self.end >= self.start + type: object + x-kubernetes-validations: + - message: must define either a SourceNode with a TemplateID or a TemplateSelector + rule: '[has(self.sourceNode), has(self.templateSelector)].exists_one(c, + c)' + - message: must define either a SourceNode with a TemplateID or a TemplateSelector + rule: '[has(self.templateID), has(self.templateSelector)].exists_one(c, + c)' + - message: Must set full=true when specifying format + rule: self.full || !has(self.format) + - message: Must set full=true when specifying storage + rule: self.full || !has(self.storage) + status: + description: status is the status of the Proxmox machine. + properties: + addresses: + description: addresses contains the Proxmox VM instance associated + addresses. + items: + description: MachineAddress contains information for the node's + address. + properties: + address: + description: address is the machine address. + maxLength: 256 + minLength: 1 + type: string + type: + description: type is the machine address type, one of Hostname, + ExternalIP, InternalIP, ExternalDNS or InternalDNS. + enum: + - Hostname + - ExternalIP + - InternalIP + - ExternalDNS + - InternalDNS + type: string + required: + - address + - type + type: object + type: array + x-kubernetes-list-type: atomic + bootstrapDataProvided: + description: bootstrapDataProvided whether the virtual machine has + an injected bootstrap data. + type: boolean + conditions: + description: conditions defines current service state of the ProxmoxMachine. + items: + description: Condition defines an observation of a Cluster API resource + operational state. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This field may be empty. + maxLength: 10240 + minLength: 1 + type: string + reason: + description: |- + reason is the reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may be empty. + maxLength: 256 + minLength: 1 + type: string + severity: + description: |- + severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. + maxLength: 32 + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. + maxLength: 256 + minLength: 1 + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + failureMessage: + description: |- + failureMessage will be set in the event that there is a terminal problem + reconciling the Machine and will contain a more verbose string suitable + for logging and human consumption. + + This field should not be set for transitive errors that a controller + faces that are expected to be fixed automatically over + time (like service outages), but instead indicate that something is + fundamentally wrong with the Machine's spec or the configuration of + the controller, and that manual intervention is required. Examples + of terminal errors would be invalid combinations of settings in the + spec, values that are unsupported by the controller, or the + responsible controller itself being critically misconfigured. + + Any transient errors that occur during the reconciliation of ProxmoxMachines + can be added as events to the ProxmoxMachine object and/or logged in the + controller's output. + type: string + failureReason: + description: |- + failureReason will be set in the event that there is a terminal problem + reconciling the Machine and will contain a succinct value suitable + for machine interpretation. + + This field should not be set for transitive errors that a controller + faces that are expected to be fixed automatically over + time (like service outages), but instead indicate that something is + fundamentally wrong with the Machine's spec or the configuration of + the controller, and that manual intervention is required. Examples + of terminal errors would be invalid combinations of settings in the + spec, values that are unsupported by the controller, or the + responsible controller itself being critically misconfigured. + + Any transient errors that occur during the reconciliation of ProxmoxMachines + can be added as events to the ProxmoxMachine object and/or logged in the + controller's output. + type: string + ipAddresses: + additionalProperties: + description: IPAddresses stores the IP addresses of a network interface. + Used for status. + properties: + ipv4: + description: ipv4 is the IPv4 address. + items: + type: string + type: array + x-kubernetes-list-type: set + ipv6: + description: ipv6 is the IPv6 address. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + description: ipAddresses are the IP addresses used to access the virtual + machine. + type: object + network: + description: |- + network returns the network status for each of the machine's configured. + network interfaces. + items: + description: NetworkStatus provides information about one of a VM's + networks. + properties: + connected: + description: |- + connected is a flag that indicates whether this network is currently + connected to the VM. + type: boolean + ipAddrs: + description: ipAddrs is one or more IP addresses reported by + vm-tools. + items: + type: string + type: array + x-kubernetes-list-type: set + macAddr: + description: macAddr is the MAC address of the network device. + maxLength: 17 + minLength: 17 + pattern: ^([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2})$ + type: string + networkName: + description: networkName is the name of the network. + pattern: ^net[0-9]+$ + type: string + required: + - connected + - macAddr + type: object + type: array + x-kubernetes-list-type: atomic + proxmoxNode: + description: |- + proxmoxNode is the name of the proxmox node, which was chosen for this + machine to be deployed on. + type: string + ready: + description: ready indicates the Docker infrastructure has been provisioned + and is ready. + type: boolean + retryAfter: + description: retryAfter tracks the time we can retry queueing a task. + format: date-time + type: string + taskRef: + description: |- + taskRef is a managed object reference to a Task related to the ProxmoxMachine. + This value is set automatically at runtime and should not be set or + modified by users. + type: string + vmStatus: + description: vmStatus is used to identify the virtual machine status. + type: string + type: object + required: + - spec + type: object + served: true storage: true subresources: status: {} diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml index 12e1503e..cbe24ce7 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml @@ -703,4 +703,565 @@ spec: type: object type: object served: true + storage: false + - name: v1alpha2 + schema: + openAPIV3Schema: + description: ProxmoxMachineTemplate is the Schema for the proxmoxmachinetemplates + API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec is the machine template spec. + properties: + template: + description: template is the Proxmox machine template resource. + properties: + metadata: + description: |- + metadata is the standard object metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + labels is a map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + type: object + spec: + description: spec is the Proxmox machine spec. + properties: + allowedNodes: + description: |- + allowedNodes specifies all Proxmox nodes which will be considered + for operations. This implies that VMs can be cloned on different nodes from + the node which holds the VM template. + + This field is optional and should only be set if you want to restrict + the nodes where the VM can be cloned. + If not set, the ProxmoxCluster will be used to determine the nodes. + items: + type: string + type: array + x-kubernetes-list-type: set + checks: + description: checks defines possibles checks to skip. + properties: + skipCloudInitStatus: + description: skipCloudInitStatus skip checking CloudInit + status which can be useful with specific Operating Systems + like TalOS + type: boolean + skipQemuGuestAgent: + description: skipQemuGuestAgent skips checking QEMU Agent + readiness which can be useful with specific Operating + Systems like TalOS + type: boolean + type: object + description: + description: description for the new VM. + type: string + disks: + description: |- + disks contains a set of disk configuration options, + which will be applied before the first startup. + properties: + bootVolume: + description: |- + bootVolume defines the storage size for the boot volume. + This field is optional, and should only be set if you want + to change the size of the boot volume. + properties: + disk: + description: |- + disk is the name of the disk device that should be resized. + Example values are: ide[0-3], scsi[0-30], sata[0-5]. + minLength: 1 + type: string + sizeGb: + description: |- + sizeGb defines the size in gigabytes. + + As Proxmox does not support shrinking, the size + must be bigger than the already configured size in the + template. + format: int32 + minimum: 5 + type: integer + required: + - disk + - sizeGb + type: object + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf + type: object + format: + description: format for file storage. Only valid for full + clone. + enum: + - raw + - qcow2 + - vmdk + type: string + full: + default: true + description: |- + full Create a full copy of all disks. + This is always done when you clone a normal VM. + Create a Full clone by default. + type: boolean + memoryMiB: + description: |- + memoryMiB is the size of a virtual machine's memory, in MiB. + Defaults to the property value in the template from which the virtual machine is cloned. + format: int32 + multipleOf: 8 + type: integer + metadataSettings: + description: metadataSettings defines the metadata settings + for this machine's VM. + properties: + providerIDInjection: + description: |- + providerIDInjection enables the injection of the `providerID` into the cloudinit metadata. + this will basically set the `provider-id` field in the metadata to `proxmox://`. + type: boolean + required: + - providerIDInjection + type: object + network: + description: network is the network configuration for this + machine's VM. + properties: + networkDevices: + description: |- + networkDevices lists network devices. + net0 is always the default device. + items: + description: NetworkDevice defines the required details + of a virtual machine network device. + properties: + bridge: + description: bridge is the network bridge to attach + to the machine. + minLength: 1 + type: string + dnsServers: + description: |- + dnsServers contains information about nameservers to be used for this interface. + If this field is not set, it will use the default dns servers from the ProxmoxCluster. + items: + type: string + minItems: 1 + type: array + x-kubernetes-list-type: set + ipPoolRef: + description: |- + ipPoolRef is a reference to an IPAM Pool resource, which exposes IPv4 addresses. + The network device will use an available IP address from the referenced pool. + This can be combined with `IPv6PoolRef` in order to enable dual stack. + items: + description: |- + TypedLocalObjectReference contains enough information to let you locate the + typed referenced object inside the same namespace. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: ipPoolRef allows only IPAM apiGroup + ipam.cluster.x-k8s.io + rule: self.apiGroup == 'ipam.cluster.x-k8s.io' + - message: ipPoolRef allows either InClusterIPPool + or GlobalInClusterIPPool + rule: self.kind == 'InClusterIPPool' || self.kind + == 'GlobalInClusterIPPool' + type: array + x-kubernetes-list-type: atomic + linkMtu: + description: linkMtu is the network device Maximum + Transmission Unit. + format: int32 + type: integer + x-kubernetes-validations: + - message: invalid MTU value + rule: self == 1 || ( self >= 576 && self <= 65520) + - message: invalid MTU value + rule: self == 1 || ( self >= 576 && self <= 65520) + model: + default: virtio + description: model is the network device model. + enum: + - e1000 + - virtio + - rtl8139 + - vmxnet3 + type: string + mtu: + description: |- + mtu is the network device Maximum Transmission Unit. + When set to 1, virtio devices inherit the MTU value from the underlying bridge. + format: int32 + type: integer + x-kubernetes-validations: + - message: invalid MTU value + rule: self == 1 || ( self >= 576 && self <= 65520) + - message: invalid MTU value + rule: self == 1 || ( self >= 576 && self <= 65520) + name: + default: net0 + description: name is the network device name. + pattern: ^net[0-9]+$ + type: string + routes: + description: routes are the routes associated with + this interface. + items: + description: RouteSpec describes an IPv4/IPv6 + Route. + properties: + metric: + description: metric is the priority of the + route in the routing table. + format: int32 + minimum: 0 + type: integer + table: + description: table is the routing table used + for this route. + format: int32 + type: integer + to: + description: to is the subnet to be routed. + type: string + via: + description: via is the gateway to the subnet. + type: string + type: object + minItems: 1 + type: array + x-kubernetes-list-type: atomic + routingPolicy: + description: routingPolicy is an interface-specific + policy inserted into FIB (forwarding information + base). + items: + description: RoutingPolicySpec is a Linux FIB + rule. + properties: + from: + description: from is the subnet of the source. + type: string + priority: + description: priority is the position in the + ip rule FIB table. + format: int64 + maximum: 4294967295 + type: integer + x-kubernetes-validations: + - message: Cowardly refusing to insert FIB + rule matching kernel rules + rule: (self > 0 && self < 32765) || (self + > 32766) + table: + description: table is the routing table ID. + format: int32 + type: integer + to: + description: to is the subnet of the target. + type: string + type: object + minItems: 1 + type: array + x-kubernetes-list-type: atomic + vlan: + description: vlan is the network L2 VLAN. + format: int32 + maximum: 4094 + minimum: 1 + type: integer + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + vrfs: + description: vrfs defines VRF Devices. + items: + description: VRFDevice defines Virtual Routing Flow + devices. + properties: + interfaces: + description: interfaces is the list of proxmox network + devices managed by this virtual device. + items: + description: NetName is a formally verified Proxmox + network name string. + pattern: ^net[0-9]+$ + type: string + type: array + x-kubernetes-list-type: set + name: + description: |- + name is the virtual network device name. + Must be unique within the virtual machine. + minLength: 3 + type: string + routes: + description: routes are the routes associated with + this interface. + items: + description: RouteSpec describes an IPv4/IPv6 + Route. + properties: + metric: + description: metric is the priority of the + route in the routing table. + format: int32 + minimum: 0 + type: integer + table: + description: table is the routing table used + for this route. + format: int32 + type: integer + to: + description: to is the subnet to be routed. + type: string + via: + description: via is the gateway to the subnet. + type: string + type: object + minItems: 1 + type: array + x-kubernetes-list-type: atomic + routingPolicy: + description: routingPolicy is an interface-specific + policy inserted into FIB (forwarding information + base). + items: + description: RoutingPolicySpec is a Linux FIB + rule. + properties: + from: + description: from is the subnet of the source. + type: string + priority: + description: priority is the position in the + ip rule FIB table. + format: int64 + maximum: 4294967295 + type: integer + x-kubernetes-validations: + - message: Cowardly refusing to insert FIB + rule matching kernel rules + rule: (self > 0 && self < 32765) || (self + > 32766) + table: + description: table is the routing table ID. + format: int32 + type: integer + to: + description: to is the subnet of the target. + type: string + type: object + minItems: 1 + type: array + x-kubernetes-list-type: atomic + table: + description: table is the ID of the routing table + used for the l3mdev vrf device. + format: int32 + maximum: 4294967295 + minimum: 1 + type: integer + x-kubernetes-validations: + - message: Cowardly refusing to insert l3mdev rules + into kernel tables + rule: (self > 0 && self < 254) || (self > 255) + required: + - name + - table + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + numCores: + description: |- + numCores is the number of cores per CPU socket in a virtual machine. + Defaults to the property value in the template from which the virtual machine is cloned. + format: int32 + minimum: 1 + type: integer + numSockets: + description: |- + numSockets is the number of CPU sockets in a virtual machine. + Defaults to the property value in the template from which the virtual machine is cloned. + format: int32 + minimum: 1 + type: integer + pool: + description: pool Add the new VM to the specified pool. + type: string + providerID: + description: |- + providerID is the virtual machine BIOS UUID formatted as + proxmox://6c3fa683-bef9-4425-b413-eaa45a9d6191 + type: string + snapName: + description: snapName The name of the snapshot. + type: string + sourceNode: + description: |- + sourceNode is the initially selected proxmox node. + This node will be used to locate the template VM, which will + be used for cloning operations. + + Cloning will be performed according to the configuration. + Setting the `Target` field will tell Proxmox to clone the + VM on that target node. + + When Target is not set and the ProxmoxCluster contains + a set of `AllowedNodes`, the algorithm will instead evenly + distribute the VMs across the nodes from that list. + + If neither a `Target` nor `AllowedNodes` was set, the VM + will be cloned onto the same node as SourceNode. + minLength: 1 + type: string + storage: + description: storage for full clone. + type: string + tags: + description: tags is a list of tags to be applied to the virtual + machine. + items: + pattern: ^(?i)[a-z0-9_][a-z0-9_\-\+\.]*$ + type: string + minItems: 1 + type: array + x-kubernetes-list-type: set + target: + description: target node. Only allowed if the original VM + is on shared storage. + type: string + templateID: + description: templateID the vm_template vmid used for cloning + a new VM. + format: int32 + type: integer + templateSelector: + description: templateSelector defines MatchTags for looking + up VM templates. + properties: + matchTags: + description: |- + matchTags specifies all tags to look for when looking up the VM template. + Passed tags must be an exact 1:1 match with the tags on the template you want to use. + If multiple VM templates with the same set of tags are found, provisioning will fail. + items: + pattern: ^(?i)[a-z0-9_][a-z0-9_\-\+\.]*$ + type: string + minItems: 1 + type: array + x-kubernetes-list-type: set + required: + - matchTags + type: object + virtualMachineID: + description: virtualMachineID is the Proxmox identifier for + the ProxmoxMachine VM. + format: int64 + type: integer + vmIDRange: + description: vmIDRange is the range of VMIDs to use for VMs. + properties: + end: + description: |- + end is the end of the VMID range to use for VMs. + Only used if VMIDRangeStart is set. + format: int64 + maximum: 999999999 + minimum: 100 + type: integer + start: + description: start is the start of the VMID range to use + for VMs. + format: int64 + maximum: 999999999 + minimum: 100 + type: integer + required: + - end + - start + type: object + x-kubernetes-validations: + - message: end should be greater than or equal to start + rule: self.end >= self.start + type: object + x-kubernetes-validations: + - message: Must set full=true when specifying format + rule: self.full || !has(self.format) + - message: Must set full=true when specifying storage + rule: self.full || !has(self.storage) + required: + - spec + type: object + required: + - template + type: object + required: + - spec + type: object + served: true storage: true + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 785e9a5f..0b55aa70 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -9,7 +9,7 @@ resources: #+kubebuilder:scaffold:crdkustomizeresource commonLabels: - cluster.x-k8s.io/v1beta1: v1alpha1 + cluster.x-k8s.io/v1beta1: v1alpha2 #patchesStrategicMerge: # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. @@ -25,6 +25,7 @@ commonLabels: #- patches/cainjection_in_proxmoxclustertemplates.yaml #- patches/cainjection_in_proxmoxmachines.yaml #- patches/cainjection_in_proxmoxmachinetemplates.yaml +#- path: patches/cainjection_in_proxmoxmachines.yaml #+kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. diff --git a/config/crd/kustomizeconfig.yaml b/config/crd/kustomizeconfig.yaml index ec5c150a..e33ea7ce 100644 --- a/config/crd/kustomizeconfig.yaml +++ b/config/crd/kustomizeconfig.yaml @@ -4,13 +4,13 @@ nameReference: version: v1 fieldSpecs: - kind: CustomResourceDefinition - version: v1 + version: v2 group: apiextensions.k8s.io path: spec/conversion/webhook/clientConfig/service/name namespace: - kind: CustomResourceDefinition - version: v1 + version: v2 group: apiextensions.k8s.io path: spec/conversion/webhook/clientConfig/service/namespace create: false diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index f61aad8e..ad13e96b 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -1,3 +1,8 @@ ---- resources: - manager.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +images: +- name: controller + newName: controller + newTag: latest diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml index 731832a6..26145e88 100644 --- a/config/rbac/kustomization.yaml +++ b/config/rbac/kustomization.yaml @@ -16,3 +16,10 @@ resources: - auth_proxy_role.yaml - auth_proxy_role_binding.yaml - auth_proxy_client_clusterrole.yaml +# For each CRD, "Editor" and "Viewer" roles are scaffolded by +# default, aiding admins in cluster management. Those roles are +# not used by the Project itself. You can comment the following lines +# if you do not want those helpers be installed with your Project. +- proxmoxmachine_editor_role.yaml +- proxmoxmachine_viewer_role.yaml + diff --git a/config/samples/infrastructure_v1alpha2_proxmoxmachine.yaml b/config/samples/infrastructure_v1alpha2_proxmoxmachine.yaml new file mode 100644 index 00000000..bb360f14 --- /dev/null +++ b/config/samples/infrastructure_v1alpha2_proxmoxmachine.yaml @@ -0,0 +1,9 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2 +kind: ProxmoxMachine +metadata: + labels: + app.kubernetes.io/name: cluster-api-provider-proxmox + app.kubernetes.io/managed-by: kustomize + name: proxmoxmachine-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index f09042b5..b3cd7786 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -5,4 +5,5 @@ resources: - infrastructure_v1alpha1_proxmoxclustertemplate.yaml - infrastructure_v1alpha1_proxmoxmachine.yaml - infrastructure_v1alpha1_proxmoxmachinetemplate.yaml +- infrastructure_v1alpha2_proxmoxmachine.yaml # +kubebuilder:scaffold:manifestskustomizesamples diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index c04d55b6..4232d217 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -10,7 +10,7 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-infrastructure-cluster-x-k8s-io-v1alpha1-proxmoxcluster + path: /validate-infrastructure-cluster-x-k8s-io-v1alpha2-proxmoxcluster failurePolicy: Fail matchPolicy: Equivalent name: validation.proxmoxcluster.infrastructure.cluster.x-k8s.io @@ -18,7 +18,7 @@ webhooks: - apiGroups: - infrastructure.cluster.x-k8s.io apiVersions: - - v1alpha1 + - v1alpha2 operations: - CREATE - UPDATE @@ -31,7 +31,7 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-infrastructure-cluster-x-k8s-io-v1alpha1-proxmoxmachine + path: /validate-infrastructure-cluster-x-k8s-io-v1alpha2-proxmoxmachine failurePolicy: Fail matchPolicy: Equivalent name: validation.proxmoxmachine.infrastructure.cluster.x-k8s.io @@ -39,7 +39,7 @@ webhooks: - apiGroups: - infrastructure.cluster.x-k8s.io apiVersions: - - v1alpha1 + - v1alpha2 operations: - CREATE - UPDATE diff --git a/go.mod b/go.mod index 388f8897..9f403410 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/ionos-cloud/cluster-api-provider-proxmox -go 1.24.6 +go 1.24.6 // keep in sync with hack/tools/go.mod replace sigs.k8s.io/cluster-api => sigs.k8s.io/cluster-api v1.10.4 @@ -18,6 +18,7 @@ require ( github.com/stretchr/testify v1.10.0 go4.org/netipx v0.0.0-20231129151722-fdeea329fbba golang.org/x/tools v0.34.0 + inet.af/netaddr v0.0.0-20230525184311-b8eac61e914a k8s.io/api v0.32.3 k8s.io/apimachinery v0.32.3 k8s.io/client-go v0.32.3 @@ -139,6 +140,8 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect go4.org v0.0.0-20201209231011-d4a079459e60 // indirect + go4.org/intern v0.0.0-20211027215823-ae77deb06f29 // indirect + go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2 // indirect golang.org/x/crypto v0.40.0 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect golang.org/x/mod v0.25.0 // indirect diff --git a/go.sum b/go.sum index 4640fa98..06f866d1 100644 --- a/go.sum +++ b/go.sum @@ -105,6 +105,7 @@ github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46 h1:7QPwrLT79GlD5 github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46/go.mod h1:esf2rsHFNlZlxsqsZDojNBcnNs5REqIvRrWRHqX0vEU= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/dvyukov/go-fuzz v0.0.0-20210103155950-6a8e9d1f2415/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= github.com/elliotwutingfeng/asciiset v0.0.0-20230602022725-51bbb787efab h1:h1UgjJdAAhj+uPL68n7XASS6bU+07ZX1WJvVS2eyoeY= github.com/elliotwutingfeng/asciiset v0.0.0-20230602022725-51bbb787efab/go.mod h1:GLo/8fDswSAniFG+BFIaiSPcK610jyzgEhWYPQwuQdw= github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= @@ -442,8 +443,13 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go4.org v0.0.0-20160314031811-03efcb870d84/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= go4.org v0.0.0-20201209231011-d4a079459e60 h1:iqAGo78tVOJXELHQFRjR6TMwItrvXH4hrGJ32I/NFF8= go4.org v0.0.0-20201209231011-d4a079459e60/go.mod h1:CIiUVy99QCPfoE13bO4EZaz5GZMZXMSBGhxRdsvzbkg= +go4.org/intern v0.0.0-20211027215823-ae77deb06f29 h1:UXLjNohABv4S58tHmeuIZDO6e3mHpW2Dx33gaNt03LE= +go4.org/intern v0.0.0-20211027215823-ae77deb06f29/go.mod h1:cS2ma+47FKrLPdXFpr7CuxiTW3eyJbWew4qx0qtQWDA= go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y= +go4.org/unsafe/assume-no-moving-gc v0.0.0-20211027215541-db492cf91b37/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= +go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2 h1:WJhcL4p+YeDxmZWg141nRm7XC8IDmhz7lk5GpadO1Sg= +go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -534,6 +540,7 @@ golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210331175145-43e1dd70ce54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -585,6 +592,7 @@ golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo= golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -659,6 +667,8 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +inet.af/netaddr v0.0.0-20230525184311-b8eac61e914a h1:1XCVEdxrvL6c0TGOhecLuB7U9zYNdxZEjvOqJreKZiM= +inet.af/netaddr v0.0.0-20230525184311-b8eac61e914a/go.mod h1:e83i32mAQOW1LAqEIweALsuK2Uw4mhQadA5r7b0Wobo= k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= k8s.io/apiextensions-apiserver v0.32.3 h1:4D8vy+9GWerlErCwVIbcQjsWunF9SUGNu7O7hiQTyPY= diff --git a/hack/tools/go.mod b/hack/tools/go.mod index b6f36a53..f3b0e936 100644 --- a/hack/tools/go.mod +++ b/hack/tools/go.mod @@ -1,13 +1,14 @@ module github.com/ionos-cloud/cluster-api-provider-proxmox -go 1.24.5 +go 1.24.6 // keep in sync with root go.mod require ( github.com/braydonk/yaml v0.9.0 - github.com/golangci/golangci-lint/v2 v2.6.0 + github.com/golangci/golangci-lint/v2 v2.6.0 // remember to update version in lint action github.com/google/yamlfmt v0.20.0 github.com/vektra/mockery/v2 v2.53.4 golang.org/x/tools v0.38.0 + sigs.k8s.io/kube-api-linter v0.0.0-20250819172928-066025356482 ) require ( @@ -85,7 +86,7 @@ require ( github.com/gobwas/glob v0.2.3 // indirect github.com/godoc-lint/godoc-lint v0.10.1 // indirect github.com/gofrs/flock v0.13.0 // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/golangci/asciicheck v0.5.0 // indirect github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32 // indirect github.com/golangci/go-printf-func-name v0.1.1 // indirect @@ -219,6 +220,8 @@ require ( google.golang.org/protobuf v1.36.8 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect honnef.co/go/tools v0.6.1 // indirect + k8s.io/apimachinery v0.32.3 // indirect + k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect mvdan.cc/gofumpt v0.9.2 // indirect mvdan.cc/unparam v0.0.0-20251027182757-5beb8c8f8f15 // indirect ) diff --git a/hack/tools/go.sum b/hack/tools/go.sum index 2fb81964..2f9f025f 100644 --- a/hack/tools/go.sum +++ b/hack/tools/go.sum @@ -264,8 +264,8 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golangci/asciicheck v0.5.0 h1:jczN/BorERZwK8oiFBOGvlGPknhvq0bjnysTj4nUfo0= github.com/golangci/asciicheck v0.5.0/go.mod h1:5RMNAInbNFw2krqN6ibBxN/zfRFa9S6tA1nPdM0l8qQ= github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32 h1:WUvBfQL6EW/40l6OmeSBYQJNSif4O11+bmWEz+C7FYw= @@ -1016,6 +1016,10 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.6.1 h1:R094WgE8K4JirYjBaOpz/AvTyUu/3wbmAoskKN/pxTI= honnef.co/go/tools v0.6.1/go.mod h1:3puzxxljPCe8RGJX7BIy1plGbxEOZni5mR2aXe3/uk4= +k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= +k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= mvdan.cc/gofumpt v0.9.2 h1:zsEMWL8SVKGHNztrx6uZrXdp7AX8r421Vvp23sz7ik4= mvdan.cc/gofumpt v0.9.2/go.mod h1:iB7Hn+ai8lPvofHd9ZFGVg2GOr8sBUw1QUWjNbmIL/s= mvdan.cc/unparam v0.0.0-20251027182757-5beb8c8f8f15 h1:ssMzja7PDPJV8FStj7hq9IKiuiKhgz9ErWw+m68e7DI= @@ -1023,3 +1027,5 @@ mvdan.cc/unparam v0.0.0-20251027182757-5beb8c8f8f15/go.mod h1:4M5MMXl2kW6fivUT6y rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/kube-api-linter v0.0.0-20250819172928-066025356482 h1:Ix3F/nLG+fxAsJpazzbGwf+snKo+mTRou+HTGD8h3F4= +sigs.k8s.io/kube-api-linter v0.0.0-20250819172928-066025356482/go.mod h1:Jxl3NU9lRf9WJ8dgwgF4U6tLF229jR/KEvtxSwRAKnE= diff --git a/hack/tools/tools.go b/hack/tools/tools.go index 181ee188..5bae5d6f 100644 --- a/hack/tools/tools.go +++ b/hack/tools/tools.go @@ -25,4 +25,5 @@ import ( _ "github.com/google/yamlfmt" _ "github.com/vektra/mockery/v2" _ "golang.org/x/tools/cmd/goimports" + _ "sigs.k8s.io/kube-api-linter/pkg/plugin" ) diff --git a/internal/controller/proxmoxcluster_controller.go b/internal/controller/proxmoxcluster_controller.go index 25959428..440d8b8e 100644 --- a/internal/controller/proxmoxcluster_controller.go +++ b/internal/controller/proxmoxcluster_controller.go @@ -45,7 +45,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" - infrav1alpha1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" + infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha2" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/kubernetes/ipam" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/proxmox" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/scope" @@ -67,10 +67,10 @@ type ProxmoxClusterReconciler struct { // SetupWithManager sets up the controller with the Manager. func (r *ProxmoxClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&infrav1alpha1.ProxmoxCluster{}). + For(&infrav1.ProxmoxCluster{}). WithEventFilter(predicates.ResourceNotPaused(r.Scheme, ctrl.LoggerFrom(ctx))). Watches(&clusterv1.Cluster{}, - handler.EnqueueRequestsFromMapFunc(clusterutil.ClusterToInfrastructureMapFunc(ctx, infrav1alpha1.GroupVersion.WithKind(infrav1alpha1.ProxmoxClusterKind), mgr.GetClient(), &infrav1alpha1.ProxmoxCluster{})), + handler.EnqueueRequestsFromMapFunc(clusterutil.ClusterToInfrastructureMapFunc(ctx, infrav1.GroupVersion.WithKind(infrav1.ProxmoxClusterKind), mgr.GetClient(), &infrav1.ProxmoxCluster{})), builder.WithPredicates(predicates.ClusterUnpaused(r.Scheme, ctrl.LoggerFrom(ctx)))). WithEventFilter(predicates.ResourceIsNotExternallyManaged(r.Scheme, ctrl.LoggerFrom(ctx))). Complete(r) @@ -96,7 +96,7 @@ func (r *ProxmoxClusterReconciler) SetupWithManager(ctx context.Context, mgr ctr func (r *ProxmoxClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { logger := log.FromContext(ctx) - proxmoxCluster := &infrav1alpha1.ProxmoxCluster{} + proxmoxCluster := &infrav1.ProxmoxCluster{} if err := r.Client.Get(ctx, req.NamespacedName, proxmoxCluster); err != nil { if apierrors.IsNotFound(err) { return ctrl.Result{}, nil @@ -173,7 +173,7 @@ func (r *ProxmoxClusterReconciler) reconcileDelete(ctx context.Context, clusterS // Requeue if there are one or more machines left. if len(machines) > 0 { clusterScope.Info("waiting for machines to be deleted", "remaining", len(machines)) - return ctrl.Result{RequeueAfter: infrav1alpha1.DefaultReconcilerRequeue}, nil + return ctrl.Result{RequeueAfter: infrav1.DefaultReconcilerRequeue}, nil } if err := r.reconcileDeleteCredentialsSecret(ctx, clusterScope); err != nil { @@ -181,7 +181,7 @@ func (r *ProxmoxClusterReconciler) reconcileDelete(ctx context.Context, clusterS } clusterScope.Info("cluster deleted successfully") - ctrlutil.RemoveFinalizer(clusterScope.ProxmoxCluster, infrav1alpha1.ClusterFinalizer) + ctrlutil.RemoveFinalizer(clusterScope.ProxmoxCluster, infrav1.ClusterFinalizer) return ctrl.Result{}, nil } @@ -189,27 +189,27 @@ func (r *ProxmoxClusterReconciler) reconcileNormal(ctx context.Context, clusterS clusterScope.Logger.Info("Reconciling ProxmoxCluster") // If the ProxmoxCluster doesn't have our finalizer, add it. - ctrlutil.AddFinalizer(clusterScope.ProxmoxCluster, infrav1alpha1.ClusterFinalizer) + ctrlutil.AddFinalizer(clusterScope.ProxmoxCluster, infrav1.ClusterFinalizer) - if clusterScope.ProxmoxCluster.Spec.ExternalManagedControlPlane { + if *clusterScope.ProxmoxCluster.Spec.ExternalManagedControlPlane { if clusterScope.ProxmoxCluster.Spec.ControlPlaneEndpoint == nil { clusterScope.Logger.Info("ProxmoxCluster is not ready, missing or waiting for a ControlPlaneEndpoint") - conditions.MarkFalse(clusterScope.ProxmoxCluster, infrav1alpha1.ProxmoxClusterReady, infrav1alpha1.MissingControlPlaneEndpointReason, clusterv1.ConditionSeverityWarning, "The ProxmoxCluster is missing or waiting for a ControlPlaneEndpoint") + conditions.MarkFalse(clusterScope.ProxmoxCluster, infrav1.ProxmoxClusterReady, infrav1.MissingControlPlaneEndpointReason, clusterv1.ConditionSeverityWarning, "The ProxmoxCluster is missing or waiting for a ControlPlaneEndpoint") return ctrl.Result{Requeue: true}, nil } if clusterScope.ProxmoxCluster.Spec.ControlPlaneEndpoint.Host == "" { clusterScope.Logger.Info("ProxmoxCluster is not ready, missing or waiting for a ControlPlaneEndpoint host") - conditions.MarkFalse(clusterScope.ProxmoxCluster, infrav1alpha1.ProxmoxClusterReady, infrav1alpha1.MissingControlPlaneEndpointReason, clusterv1.ConditionSeverityWarning, "The ProxmoxCluster is missing or waiting for a ControlPlaneEndpoint host") + conditions.MarkFalse(clusterScope.ProxmoxCluster, infrav1.ProxmoxClusterReady, infrav1.MissingControlPlaneEndpointReason, clusterv1.ConditionSeverityWarning, "The ProxmoxCluster is missing or waiting for a ControlPlaneEndpoint host") return ctrl.Result{Requeue: true}, nil } if clusterScope.ProxmoxCluster.Spec.ControlPlaneEndpoint.Port == 0 { clusterScope.Logger.Info("ProxmoxCluster is not ready, missing or waiting for a ControlPlaneEndpoint port") - conditions.MarkFalse(clusterScope.ProxmoxCluster, infrav1alpha1.ProxmoxClusterReady, infrav1alpha1.MissingControlPlaneEndpointReason, clusterv1.ConditionSeverityWarning, "The ProxmoxCluster is missing or waiting for a ControlPlaneEndpoint port") + conditions.MarkFalse(clusterScope.ProxmoxCluster, infrav1.ProxmoxClusterReady, infrav1.MissingControlPlaneEndpointReason, clusterv1.ConditionSeverityWarning, "The ProxmoxCluster is missing or waiting for a ControlPlaneEndpoint port") return ctrl.Result{Requeue: true}, nil } @@ -233,7 +233,7 @@ func (r *ProxmoxClusterReconciler) reconcileNormal(ctx context.Context, clusterS } if err := r.reconcileNormalCredentialsSecret(ctx, clusterScope); err != nil { - conditions.MarkFalse(clusterScope.ProxmoxCluster, infrav1alpha1.ProxmoxClusterReady, infrav1alpha1.ProxmoxUnreachableReason, clusterv1.ConditionSeverityError, "%s", err) + conditions.MarkFalse(clusterScope.ProxmoxCluster, infrav1.ProxmoxClusterReady, infrav1.ProxmoxUnreachableReason, clusterv1.ConditionSeverityError, "%s", err) if apierrors.IsNotFound(err) { clusterScope.ProxmoxCluster.Status.FailureMessage = ptr.To("credentials secret not found") clusterScope.ProxmoxCluster.Status.FailureReason = ptr.To(clustererrors.InvalidConfigurationClusterError) @@ -241,9 +241,9 @@ func (r *ProxmoxClusterReconciler) reconcileNormal(ctx context.Context, clusterS return reconcile.Result{}, err } - conditions.MarkTrue(clusterScope.ProxmoxCluster, infrav1alpha1.ProxmoxClusterReady) + conditions.MarkTrue(clusterScope.ProxmoxCluster, infrav1.ProxmoxClusterReady) - clusterScope.ProxmoxCluster.Status.Ready = true + clusterScope.ProxmoxCluster.Status.Ready = ptr.To(true) return ctrl.Result{}, nil } @@ -285,26 +285,26 @@ func (r *ProxmoxClusterReconciler) reconcileIPAM(ctx context.Context, clusterSco } if clusterScope.ProxmoxCluster.Spec.IPv4Config != nil { - poolV4, err := clusterScope.IPAMHelper.GetDefaultInClusterIPPool(ctx, infrav1alpha1.IPV4Format) + poolv4, err := clusterScope.IPAMHelper.GetDefaultInClusterIPPool(ctx, infrav1.IPv4Format) if err != nil { if apierrors.IsNotFound(err) { - return ctrl.Result{RequeueAfter: infrav1alpha1.DefaultReconcilerRequeue}, nil + return ctrl.Result{RequeueAfter: infrav1.DefaultReconcilerRequeue}, nil } return ctrl.Result{}, err } - clusterScope.ProxmoxCluster.SetInClusterIPPoolRef(poolV4) + clusterScope.ProxmoxCluster.SetInClusterIPPoolRef(poolv4) } if clusterScope.ProxmoxCluster.Spec.IPv6Config != nil { - poolV6, err := clusterScope.IPAMHelper.GetDefaultInClusterIPPool(ctx, infrav1alpha1.IPV6Format) + poolv6, err := clusterScope.IPAMHelper.GetDefaultInClusterIPPool(ctx, infrav1.IPv6Format) if err != nil { if apierrors.IsNotFound(err) { - return ctrl.Result{RequeueAfter: infrav1alpha1.DefaultReconcilerRequeue}, nil + return ctrl.Result{RequeueAfter: infrav1.DefaultReconcilerRequeue}, nil } return ctrl.Result{}, err } - clusterScope.ProxmoxCluster.SetInClusterIPPoolRef(poolV6) + clusterScope.ProxmoxCluster.SetInClusterIPPoolRef(poolv6) } return reconcile.Result{}, nil @@ -334,7 +334,7 @@ func (r *ProxmoxClusterReconciler) reconcileNormalCredentialsSecret(ctx context. // Ensure the ProxmoxCluster is an owner and that the APIVersion is up-to-date. secret.SetOwnerReferences(clusterutil.EnsureOwnerRef(secret.GetOwnerReferences(), metav1.OwnerReference{ - APIVersion: infrav1alpha1.GroupVersion.String(), + APIVersion: infrav1.GroupVersion.String(), Kind: "ProxmoxCluster", Name: proxmoxCluster.Name, UID: proxmoxCluster.UID, @@ -342,8 +342,8 @@ func (r *ProxmoxClusterReconciler) reconcileNormalCredentialsSecret(ctx context. )) // Ensure the finalizer is added. - if !ctrlutil.ContainsFinalizer(secret, infrav1alpha1.SecretFinalizer) { - ctrlutil.AddFinalizer(secret, infrav1alpha1.SecretFinalizer) + if !ctrlutil.ContainsFinalizer(secret, infrav1.SecretFinalizer) { + ctrlutil.AddFinalizer(secret, infrav1.SecretFinalizer) } return helper.Patch(ctx, secret) @@ -376,7 +376,7 @@ func (r *ProxmoxClusterReconciler) reconcileDeleteCredentialsSecret(ctx context. } ownerRef := metav1.OwnerReference{ - APIVersion: infrav1alpha1.GroupVersion.String(), + APIVersion: infrav1.GroupVersion.String(), Kind: "ProxmoxCluster", Name: proxmoxCluster.Name, UID: proxmoxCluster.UID, @@ -385,20 +385,20 @@ func (r *ProxmoxClusterReconciler) reconcileDeleteCredentialsSecret(ctx context. if len(secret.GetOwnerReferences()) > 1 { // Remove the ProxmoxCluster from the OwnerRef. secret.SetOwnerReferences(clusterutil.RemoveOwnerRef(secret.GetOwnerReferences(), ownerRef)) - } else if clusterutil.HasOwnerRef(secret.GetOwnerReferences(), ownerRef) && ctrlutil.ContainsFinalizer(secret, infrav1alpha1.SecretFinalizer) { + } else if clusterutil.HasOwnerRef(secret.GetOwnerReferences(), ownerRef) && ctrlutil.ContainsFinalizer(secret, infrav1.SecretFinalizer) { // There is only one OwnerRef, the current ProxmoxCluster. Remove the Finalizer (if present). - logger.Info(fmt.Sprintf("Removing finalizer %s", infrav1alpha1.SecretFinalizer), "Secret", klog.KObj(secret)) - ctrlutil.RemoveFinalizer(secret, infrav1alpha1.SecretFinalizer) + logger.Info(fmt.Sprintf("Removing finalizer %s", infrav1.SecretFinalizer), "Secret", klog.KObj(secret)) + ctrlutil.RemoveFinalizer(secret, infrav1.SecretFinalizer) } return helper.Patch(ctx, secret) } -func hasCredentialsRef(proxmoxCluster *infrav1alpha1.ProxmoxCluster) bool { +func hasCredentialsRef(proxmoxCluster *infrav1.ProxmoxCluster) bool { return proxmoxCluster != nil && proxmoxCluster.Spec.CredentialsRef != nil } -func getNamespaceFromProxmoxCluster(proxmoxCluster *infrav1alpha1.ProxmoxCluster) string { +func getNamespaceFromProxmoxCluster(proxmoxCluster *infrav1.ProxmoxCluster) string { namespace := proxmoxCluster.Spec.CredentialsRef.Namespace if len(namespace) == 0 { namespace = proxmoxCluster.GetNamespace() diff --git a/internal/controller/proxmoxcluster_controller_test.go b/internal/controller/proxmoxcluster_controller_test.go index 273afa86..640f9ac0 100644 --- a/internal/controller/proxmoxcluster_controller_test.go +++ b/internal/controller/proxmoxcluster_controller_test.go @@ -40,7 +40,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" + infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha2" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/kubernetes/ipam" ) @@ -102,7 +102,7 @@ var _ = Describe("Controller Test", func() { assertClusterIsReady(testEnv.GetContext(), g, clusterName) g.Eventually(func(g Gomega) { - pool, err := helper.GetDefaultInClusterIPPool(testEnv.GetContext(), infrav1.IPV4Format) + pool, err := helper.GetDefaultInClusterIPPool(testEnv.GetContext(), infrav1.IPv4Format) g.Expect(err).ToNot(HaveOccurred()) config := cl.Spec.IPv4Config @@ -118,7 +118,7 @@ var _ = Describe("Controller Test", func() { WithPolling(time.Second). Should(Succeed()) }) - It("Should successfully create IPAM IPV6 related resources", func() { + It("Should successfully create IPAM IPv6 related resources", func() { cl := buildProxmoxCluster(clusterName) cl.Spec.IPv6Config = &infrav1.IPConfigSpec{ Addresses: []string{"2001:db8::/64"}, @@ -134,7 +134,7 @@ var _ = Describe("Controller Test", func() { assertClusterIsReady(testEnv.GetContext(), g, clusterName) g.Eventually(func(g Gomega) { - pool, err := helper.GetDefaultInClusterIPPool(testEnv.GetContext(), infrav1.IPV6Format) + pool, err := helper.GetDefaultInClusterIPPool(testEnv.GetContext(), infrav1.IPv6Format) g.Expect(err).ToNot(HaveOccurred()) config := cl.Spec.IPv6Config @@ -161,7 +161,7 @@ var _ = Describe("Controller Test", func() { assertClusterIsReady(testEnv.GetContext(), g, clusterName) g.Eventually(func(g Gomega) { - pool, err := helper.GetDefaultInClusterIPPool(testEnv.GetContext(), infrav1.IPV4Format) + pool, err := helper.GetDefaultInClusterIPPool(testEnv.GetContext(), infrav1.IPv4Format) g.Expect(err).ToNot(HaveOccurred()) config := cl.Spec.IPv4Config @@ -176,13 +176,13 @@ var _ = Describe("Controller Test", func() { WithPolling(time.Second). Should(Succeed()) - pool, err := helper.GetDefaultInClusterIPPool(testEnv.GetContext(), infrav1.IPV4Format) + pool, err := helper.GetDefaultInClusterIPPool(testEnv.GetContext(), infrav1.IPv4Format) g.Expect(err).ToNot(HaveOccurred()) // create an IPAddress. g.Expect(k8sClient.Create(testEnv.GetContext(), dummyIPAddress(k8sClient, &cl, pool.GetName()))).To(Succeed()) g.Eventually(func(g Gomega) { - pool, err := helper.GetDefaultInClusterIPPool(testEnv.GetContext(), infrav1.IPV4Format) + pool, err := helper.GetDefaultInClusterIPPool(testEnv.GetContext(), infrav1.IPv4Format) g.Expect(err).ToNot(HaveOccurred()) ipAddr, err := helper.GetIPAddress(testEnv.GetContext(), client.ObjectKeyFromObject(&cl)) @@ -378,7 +378,7 @@ func assertClusterIsReady(ctx context.Context, g Gomega, clusterName string) { Name: clusterName, }, &res)).To(Succeed()) - g.Expect(res.Status.Ready).To(BeTrue()) + g.Expect(ptr.Deref(res.Status.Ready, false)).To(BeTrue()) }).WithTimeout(time.Second * 20). WithPolling(time.Second). Should(Succeed()) diff --git a/internal/controller/proxmoxmachine_controller.go b/internal/controller/proxmoxmachine_controller.go index f9d8a629..23289ef8 100644 --- a/internal/controller/proxmoxmachine_controller.go +++ b/internal/controller/proxmoxmachine_controller.go @@ -36,7 +36,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" - infrav1alpha1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" + infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha2" "github.com/ionos-cloud/cluster-api-provider-proxmox/internal/service/taskservice" "github.com/ionos-cloud/cluster-api-provider-proxmox/internal/service/vmservice" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/kubernetes/ipam" @@ -55,10 +55,10 @@ type ProxmoxMachineReconciler struct { // SetupWithManager sets up the controller with the Manager. func (r *ProxmoxMachineReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&infrav1alpha1.ProxmoxMachine{}). + For(&infrav1.ProxmoxMachine{}). Watches( &clusterv1.Machine{}, - handler.EnqueueRequestsFromMapFunc(util.MachineToInfrastructureMapFunc(infrav1alpha1.GroupVersion.WithKind(infrav1alpha1.ProxmoxMachineKind))), + handler.EnqueueRequestsFromMapFunc(util.MachineToInfrastructureMapFunc(infrav1.GroupVersion.WithKind(infrav1.ProxmoxMachineKind))), ). Complete(r) } @@ -82,7 +82,7 @@ func (r *ProxmoxMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reque logger := log.FromContext(ctx) // Fetch the ProxmoxMachine instance. - proxmoxMachine := &infrav1alpha1.ProxmoxMachine{} + proxmoxMachine := &infrav1.ProxmoxMachine{} err := r.Get(ctx, req.NamespacedName, proxmoxMachine) if err != nil { if apierrors.IsNotFound(err) { @@ -157,14 +157,14 @@ func (r *ProxmoxMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reque func (r *ProxmoxMachineReconciler) reconcileDelete(ctx context.Context, machineScope *scope.MachineScope) (ctrl.Result, error) { machineScope.Logger.Info("Handling deleted ProxmoxMachine") - conditions.MarkFalse(machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") err := vmservice.DeleteVM(ctx, machineScope) if err != nil { return reconcile.Result{}, err } // VM is being deleted - return reconcile.Result{RequeueAfter: infrav1alpha1.DefaultReconcilerRequeue}, nil + return reconcile.Result{RequeueAfter: infrav1.DefaultReconcilerRequeue}, nil } func (r *ProxmoxMachineReconciler) reconcileNormal(ctx context.Context, machineScope *scope.MachineScope, clusterScope *scope.ClusterScope) (reconcile.Result, error) { @@ -178,19 +178,19 @@ func (r *ProxmoxMachineReconciler) reconcileNormal(ctx context.Context, machineS if !machineScope.Cluster.Status.InfrastructureReady { machineScope.Info("Cluster infrastructure is not ready yet") - conditions.MarkFalse(machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition, infrav1alpha1.WaitingForClusterInfrastructureReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition, infrav1.WaitingForClusterInfrastructureReason, clusterv1.ConditionSeverityInfo, "") return ctrl.Result{}, nil } // Make sure bootstrap data is available and populated. if machineScope.Machine.Spec.Bootstrap.DataSecretName == nil { machineScope.Info("Bootstrap data secret reference is not yet available") - conditions.MarkFalse(machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition, infrav1alpha1.WaitingForBootstrapDataReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition, infrav1.WaitingForBootstrapDataReason, clusterv1.ConditionSeverityInfo, "") return ctrl.Result{}, nil } // If the ProxmoxMachine doesn't have our finalizer, add it. - if ctrlutil.AddFinalizer(machineScope.ProxmoxMachine, infrav1alpha1.MachineFinalizer) { + if ctrlutil.AddFinalizer(machineScope.ProxmoxMachine, infrav1.MachineFinalizer) { // Register the finalizer after first read operation from Proxmox to avoid orphaning Proxmox resources on delete if err := machineScope.PatchObject(); err != nil { machineScope.Error(err, "unable to patch object") @@ -209,31 +209,31 @@ func (r *ProxmoxMachineReconciler) reconcileNormal(ctx context.Context, machineS machineScope.Logger.Error(err, "error reconciling VM") return reconcile.Result{}, errors.Wrapf(err, "failed to reconcile VM") } - machineScope.ProxmoxMachine.Status.VMStatus = vm.State + machineScope.ProxmoxMachine.Status.VMStatus = &vm.State // Do not proceed until the backend VM is marked ready. - if vm.State != infrav1alpha1.VirtualMachineStateReady { + if vm.State != infrav1.VirtualMachineStateReady { machineScope.Logger.Info( "VM state is not reconciled", - "expectedVMState", infrav1alpha1.VirtualMachineStateReady, + "expectedVMState", infrav1.VirtualMachineStateReady, "actualVMState", vm.State) - return reconcile.Result{RequeueAfter: infrav1alpha1.DefaultReconcilerRequeue}, nil + return reconcile.Result{RequeueAfter: infrav1.DefaultReconcilerRequeue}, nil } // TODO, check if we need to add some labels to the machine. machineScope.SetReady() - conditions.MarkTrue(machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition) + conditions.MarkTrue(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition) machineScope.Logger.Info("ProxmoxMachine is ready") return reconcile.Result{}, nil } -func (r *ProxmoxMachineReconciler) getInfraCluster(ctx context.Context, logger *logr.Logger, cluster *clusterv1.Cluster, proxmoxMachine *infrav1alpha1.ProxmoxMachine) (*scope.ClusterScope, error) { +func (r *ProxmoxMachineReconciler) getInfraCluster(ctx context.Context, logger *logr.Logger, cluster *clusterv1.Cluster, proxmoxMachine *infrav1.ProxmoxMachine) (*scope.ClusterScope, error) { var clusterScope *scope.ClusterScope var err error - proxmoxCluster := &infrav1alpha1.ProxmoxCluster{} + proxmoxCluster := &infrav1.ProxmoxCluster{} infraClusterName := client.ObjectKey{ Namespace: proxmoxMachine.Namespace, diff --git a/internal/controller/proxmoxmachine_controller_test.go b/internal/controller/proxmoxmachine_controller_test.go index c807871e..d91a203a 100644 --- a/internal/controller/proxmoxmachine_controller_test.go +++ b/internal/controller/proxmoxmachine_controller_test.go @@ -26,7 +26,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" + infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha2" ) var _ = Describe("ProxmoxMachineReconciler", func() { diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index 7f02da0d..b4fac99c 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -30,7 +30,10 @@ import ( "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/proxmox/proxmoxtest" "github.com/ionos-cloud/cluster-api-provider-proxmox/test/helpers" + // +kubebuilder:scaffold:imports + + ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" ) // These tests use Ginkgo (BDD-style Go testing framework). Refer to @@ -56,6 +59,16 @@ func TestAPIs(t *testing.T) { var _ = BeforeSuite(func() { proxmoxClient = proxmoxtest.NewMockClient(GinkgoT()) testEnv = helpers.NewTestEnvironment(false, proxmoxClient) + // TODO: do I need this? + cache := testEnv.GetCache() + + indexFunc := func(obj client.Object) []string { + return []string{obj.(*ipamv1.IPAddress).Spec.PoolRef.Name} + } + + if err := cache.IndexField(testEnv.GetContext(), &ipamv1.IPAddress{}, "spec.poolRef.name", indexFunc); err != nil { + panic(err) + } logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) diff --git a/internal/inject/inject_test.go b/internal/inject/inject_test.go index 54ca2bde..28089aca 100644 --- a/internal/inject/inject_test.go +++ b/internal/inject/inject_test.go @@ -96,8 +96,7 @@ func TestISOInjectorInjectCloudInit(t *testing.T) { NetworkRenderer: cloudinit.NewNetworkConfig([]types.NetworkConfigData{ { Name: "eth0", - IPAddress: "10.1.1.6/24", - Gateway: "10.1.1.1", + IPConfigs: []types.IPConfig{{IPAddress: "10.1.1.6/24", Gateway: "10.1.1.1"}}, DNSServers: []string{"8.8.8.8", "8.8.4.4"}, }, }), @@ -140,8 +139,7 @@ func TestISOInjectorInjectCloudInit_Errors(t *testing.T) { NetworkRenderer: cloudinit.NewNetworkConfig([]types.NetworkConfigData{ { Name: "eth0", - IPAddress: "10.1.1.6/24", - Gateway: "10.1.1.1", + IPConfigs: []types.IPConfig{{IPAddress: "10.1.1.6/24", Gateway: "10.1.1.1"}}, DNSServers: []string{"8.8.8.8", "8.8.4.4"}, }, }), @@ -191,8 +189,7 @@ func TestISOInjectorInjectIgnition(t *testing.T) { Network: []types.NetworkConfigData{ { Name: "eth0", - IPAddress: "10.1.1.6/24", - Gateway: "10.1.1.1", + IPConfigs: []types.IPConfig{{IPAddress: "10.1.1.6/24", Gateway: "10.1.1.1"}}, DNSServers: []string{"8.8.8.8", "8.8.4.4"}, }, }, @@ -243,8 +240,7 @@ func TestISOInjectorInjectIgnition_Errors(t *testing.T) { Network: []types.NetworkConfigData{ { Name: "eth0", - IPAddress: "10.1.1.9/24", - Gateway: "10.1.1.1", + IPConfigs: []types.IPConfig{{IPAddress: "10.1.1.9/24", Gateway: "10.1.1.1"}}, DNSServers: []string{"10.1.1.1"}, }, }, @@ -297,8 +293,7 @@ func TestISOInjectorInject_Unsupported(t *testing.T) { NetworkRenderer: cloudinit.NewNetworkConfig([]types.NetworkConfigData{ { Name: "eth0", - IPAddress: "10.1.1.6/24", - Gateway: "10.1.1.1", + IPConfigs: []types.IPConfig{{IPAddress: "10.1.1.6/24", Gateway: "10.1.1.1"}}, DNSServers: []string{"8.8.8.8", "8.8.4.4"}, }, }), diff --git a/internal/service/scheduler/vmscheduler.go b/internal/service/scheduler/vmscheduler.go index 50ad6557..0c7ed984 100644 --- a/internal/service/scheduler/vmscheduler.go +++ b/internal/service/scheduler/vmscheduler.go @@ -26,7 +26,7 @@ import ( "github.com/go-logr/logr" "sigs.k8s.io/cluster-api/util" - infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" + infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha2" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/scope" ) @@ -82,7 +82,7 @@ func selectNode( sort.Sort(byMemory) - requestedMemory := uint64(machine.Spec.MemoryMiB) * 1024 * 1024 // convert to bytes + requestedMemory := uint64(*machine.Spec.MemoryMiB) * 1024 * 1024 // convert to bytes if requestedMemory > byMemory[0].AvailableMemory { // no more space on the node with the highest amount of available memory return "", InsufficientMemoryError{ @@ -128,7 +128,7 @@ func selectNode( } type resourceClient interface { - GetReservableMemoryBytes(context.Context, string, uint64) (uint64, error) + GetReservableMemoryBytes(context.Context, string, int64) (uint64, error) } type nodeInfo struct { diff --git a/internal/service/scheduler/vmscheduler_test.go b/internal/service/scheduler/vmscheduler_test.go index 3251dea1..416fe74c 100644 --- a/internal/service/scheduler/vmscheduler_test.go +++ b/internal/service/scheduler/vmscheduler_test.go @@ -27,11 +27,12 @@ import ( "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" - infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" + infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha2" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/kubernetes/ipam" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/proxmox/proxmoxtest" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/scope" @@ -39,18 +40,18 @@ import ( type fakeResourceClient map[string]uint64 -func (c fakeResourceClient) GetReservableMemoryBytes(_ context.Context, nodeName string, _ uint64) (uint64, error) { +func (c fakeResourceClient) GetReservableMemoryBytes(_ context.Context, nodeName string, _ int64) (uint64, error) { return c[nodeName], nil } -func miBytes(in uint64) uint64 { - return in * 1024 * 1024 +func miBytes(in int32) uint64 { + return uint64(in) * 1024 * 1024 } func TestSelectNode(t *testing.T) { allowedNodes := []string{"pve1", "pve2", "pve3"} var locations []infrav1.NodeLocation - const requestMiB = 8 + var requestMiB = int32(8) availableMem := map[string]uint64{ "pve1": miBytes(20), "pve2": miBytes(30), @@ -67,9 +68,9 @@ func TestSelectNode(t *testing.T) { for i, expectedNode := range expectedNodes { t.Run(fmt.Sprintf("round %d", i+1), func(t *testing.T) { proxmoxMachine := &infrav1.ProxmoxMachine{ - Spec: infrav1.ProxmoxMachineSpec{ - MemoryMiB: requestMiB, - }, + Spec: ptr.To(infrav1.ProxmoxMachineSpec{ + MemoryMiB: &requestMiB, + }), } client := fakeResourceClient(availableMem) @@ -87,9 +88,9 @@ func TestSelectNode(t *testing.T) { t.Run("out of memory", func(t *testing.T) { proxmoxMachine := &infrav1.ProxmoxMachine{ - Spec: infrav1.ProxmoxMachineSpec{ - MemoryMiB: requestMiB, - }, + Spec: ptr.To(infrav1.ProxmoxMachineSpec{ + MemoryMiB: &requestMiB, + }), } client := fakeResourceClient(availableMem) @@ -145,9 +146,9 @@ func TestScheduleVM(t *testing.T) { "cluster.x-k8s.io/cluster-name": "bar", }, }, - Spec: infrav1.ProxmoxMachineSpec{ - MemoryMiB: 10, - }, + Spec: ptr.To(infrav1.ProxmoxMachineSpec{ + MemoryMiB: ptr.To(int32(10)), + }), } fakeProxmoxClient := proxmoxtest.NewMockClient(t) @@ -177,9 +178,9 @@ func TestScheduleVM(t *testing.T) { }) require.NoError(t, err) - fakeProxmoxClient.EXPECT().GetReservableMemoryBytes(context.Background(), "pve1", uint64(100)).Return(miBytes(60), nil) - fakeProxmoxClient.EXPECT().GetReservableMemoryBytes(context.Background(), "pve2", uint64(100)).Return(miBytes(20), nil) - fakeProxmoxClient.EXPECT().GetReservableMemoryBytes(context.Background(), "pve3", uint64(100)).Return(miBytes(20), nil) + fakeProxmoxClient.EXPECT().GetReservableMemoryBytes(context.Background(), "pve1", int64(100)).Return(miBytes(60), nil) + fakeProxmoxClient.EXPECT().GetReservableMemoryBytes(context.Background(), "pve2", int64(100)).Return(miBytes(20), nil) + fakeProxmoxClient.EXPECT().GetReservableMemoryBytes(context.Background(), "pve3", int64(100)).Return(miBytes(20), nil) node, err := ScheduleVM(context.Background(), machineScope) require.NoError(t, err) diff --git a/internal/service/taskservice/task.go b/internal/service/taskservice/task.go index 35da8553..ff1af95b 100644 --- a/internal/service/taskservice/task.go +++ b/internal/service/taskservice/task.go @@ -28,7 +28,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" - infrav1alpha1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" + infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha2" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/scope" ) @@ -120,18 +120,18 @@ func checkAndRetryTask(scope *scope.MachineScope, task *proxmox.Task) (bool, err } else { errorMessage = "task failed but its exit status is OK; this should not happen" } - conditions.MarkFalse(scope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition, infrav1alpha1.TaskFailure, clusterv1.ConditionSeverityInfo, "%s", errorMessage) + conditions.MarkFalse(scope.ProxmoxMachine, infrav1.VMProvisionedCondition, infrav1.TaskFailure, clusterv1.ConditionSeverityInfo, "%s", errorMessage) // Instead of directly requeuing the failed task, wait for the RetryAfter duration to pass // before resetting the taskRef from the ProxmoxMachine status. if scope.ProxmoxMachine.Status.RetryAfter.IsZero() { - scope.ProxmoxMachine.Status.RetryAfter = metav1.Time{Time: time.Now().Add(1 * time.Minute)} + scope.ProxmoxMachine.Status.RetryAfter = &metav1.Time{Time: time.Now().Add(1 * time.Minute)} } else { scope.ProxmoxMachine.Status.TaskRef = nil - scope.ProxmoxMachine.Status.RetryAfter = metav1.Time{} + scope.ProxmoxMachine.Status.RetryAfter = nil } return true, nil default: - return false, NewRequeueError(fmt.Sprintf("unknown task state %q for %q", task.ExitStatus, scope.ProxmoxMachine.Name), infrav1alpha1.DefaultReconcilerRequeue) + return false, NewRequeueError(fmt.Sprintf("unknown task state %q for %q", task.ExitStatus, scope.ProxmoxMachine.Name), infrav1.DefaultReconcilerRequeue) } } diff --git a/internal/service/vmservice/bootstrap.go b/internal/service/vmservice/bootstrap.go index 731f1558..92ac6133 100644 --- a/internal/service/vmservice/bootstrap.go +++ b/internal/service/vmservice/bootstrap.go @@ -18,6 +18,7 @@ package vmservice import ( "context" + "encoding/json" "fmt" "strings" @@ -26,9 +27,10 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" - infrav1alpha1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" + infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha2" "github.com/ionos-cloud/cluster-api-provider-proxmox/internal/inject" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/cloudinit" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/ignition" @@ -37,17 +39,12 @@ import ( ) func reconcileBootstrapData(ctx context.Context, machineScope *scope.MachineScope) (requeue bool, err error) { - if ptr.Deref(machineScope.ProxmoxMachine.Status.BootstrapDataProvided, false) { - // skip machine already have the bootstrap data. + if conditions.GetReason(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition) != infrav1.WaitingForBootstrapDataReconcilationReason { + // Machine is in the wrong state to reconcile, we only reconcile VMs Waiting for Bootsrap Data reconciliation return false, nil } - if !machineHasIPAddress(machineScope.ProxmoxMachine) { - // skip machine doesn't have an IpAddress yet. - conditions.MarkFalse(machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition, infrav1alpha1.WaitingForStaticIPAllocationReason, clusterv1.ConditionSeverityWarning, "no ip address") - return true, nil - } - + // TODO: remove. // make sure MacAddress is set. if !vmHasMacAddresses(machineScope) { return true, nil @@ -58,7 +55,7 @@ func reconcileBootstrapData(ctx context.Context, machineScope *scope.MachineScop // Get the bootstrap data. bootstrapData, format, err := getBootstrapData(ctx, machineScope) if err != nil { - conditions.MarkFalse(machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition, infrav1alpha1.CloningFailedReason, clusterv1.ConditionSeverityWarning, "%s", err) + conditions.MarkFalse(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition, infrav1.CloningFailedReason, clusterv1.ConditionSeverityWarning, "%s", err) return false, err } @@ -66,17 +63,15 @@ func reconcileBootstrapData(ctx context.Context, machineScope *scope.MachineScop nicData, err := getNetworkConfigData(ctx, machineScope) if err != nil { - conditions.MarkFalse(machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition, infrav1alpha1.WaitingForStaticIPAllocationReason, clusterv1.ConditionSeverityWarning, "%s", err) + conditions.MarkFalse(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition, infrav1.WaitingForBootstrapDataReconcilationReason, clusterv1.ConditionSeverityWarning, "%s", err) return false, err } - kubernetesVersion := "" - if machineScope.Machine.Spec.Version != nil { - kubernetesVersion = *machineScope.Machine.Spec.Version - } + kubernetesVersion := ptr.Deref(machineScope.Machine.Spec.Version, "") machineScope.Logger.V(4).Info("reconciling BootstrapData.", "format", format) + machineScope.Logger.V(4).Info("nicData", "json", func() string { ret, _ := json.Marshal(nicData); return string(ret) }()) // Inject userdata based on the format if ptr.Deref(format, "") == ignition.FormatIgnition { err = injectIgnition(ctx, machineScope, bootstrapData, biosUUID, nicData, kubernetesVersion) @@ -84,10 +79,14 @@ func reconcileBootstrapData(ctx context.Context, machineScope *scope.MachineScop err = injectCloudInit(ctx, machineScope, bootstrapData, biosUUID, nicData, kubernetesVersion) } if err != nil { + // Todo: test this (colliding default gateways for example) + conditions.MarkFalse(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition, infrav1.VMProvisionFailedReason, clusterv1.ConditionSeverityWarning, "%s", err) return false, errors.Wrap(err, "failed to inject bootstrap data") } + // Todo: This status field is now superfluous machineScope.ProxmoxMachine.Status.BootstrapDataProvided = ptr.To(true) + conditions.MarkFalse(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition, infrav1.WaitingForVMPowerUpReason, clusterv1.ConditionSeverityInfo, "") return false, nil } @@ -97,19 +96,15 @@ func injectCloudInit(ctx context.Context, machineScope *scope.MachineScope, boot network := cloudinit.NewNetworkConfig(nicData) // create metadata renderer - metadata := cloudinit.NewMetadata(biosUUID, machineScope.Name(), kubernetesVersion, ptr.Deref(machineScope.ProxmoxMachine.Spec.MetadataSettings, infrav1alpha1.MetadataSettings{ProviderIDInjection: false}).ProviderIDInjection) + metadata := cloudinit.NewMetadata(biosUUID, machineScope.Name(), kubernetesVersion, *ptr.Deref(machineScope.ProxmoxMachine.Spec.MetadataSettings, infrav1.MetadataSettings{ProviderIDInjection: ptr.To(false)}).ProviderIDInjection) injector := getISOInjector(machineScope.VirtualMachine, bootstrapData, metadata, network) - if err := injector.Inject(ctx, inject.CloudConfigFormat); err != nil { - conditions.MarkFalse(machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition, infrav1alpha1.VMProvisionFailedReason, clusterv1.ConditionSeverityWarning, "%s", err) - return err - } - return nil + return injector.Inject(ctx, inject.CloudConfigFormat) } func injectIgnition(ctx context.Context, machineScope *scope.MachineScope, bootstrapData []byte, biosUUID string, nicData []types.NetworkConfigData, kubernetesVersion string) error { // create metadata renderer - metadata := cloudinit.NewMetadata(biosUUID, machineScope.Name(), kubernetesVersion, ptr.Deref(machineScope.ProxmoxMachine.Spec.MetadataSettings, infrav1alpha1.MetadataSettings{ProviderIDInjection: false}).ProviderIDInjection) + metadata := cloudinit.NewMetadata(biosUUID, machineScope.Name(), kubernetesVersion, *ptr.Deref(machineScope.ProxmoxMachine.Spec.MetadataSettings, infrav1.MetadataSettings{ProviderIDInjection: ptr.To(false)}).ProviderIDInjection) // create an enricher enricher := &ignition.Enricher{ @@ -121,11 +116,7 @@ func injectIgnition(ctx context.Context, machineScope *scope.MachineScope, boots } injector := getIgnitionISOInjector(machineScope.VirtualMachine, metadata, enricher) - if err := injector.Inject(ctx, inject.IgnitionFormat); err != nil { - conditions.MarkFalse(machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition, infrav1alpha1.VMProvisionFailedReason, clusterv1.ConditionSeverityWarning, "%s", err) - return err - } - return nil + return injector.Inject(ctx, inject.IgnitionFormat) } type isoInjector interface { @@ -183,20 +174,14 @@ func getBootstrapData(ctx context.Context, scope *scope.MachineScope) ([]byte, * func getNetworkConfigData(ctx context.Context, machineScope *scope.MachineScope) ([]types.NetworkConfigData, error) { // provide a default in case network is not defined - network := ptr.Deref(machineScope.ProxmoxMachine.Spec.Network, infrav1alpha1.NetworkSpec{}) - networkConfigData := make([]types.NetworkConfigData, 0, 1+len(network.AdditionalDevices)+len(network.VRFs)) + network := ptr.Deref(machineScope.ProxmoxMachine.Spec.Network, infrav1.NetworkSpec{}) + networkConfigData := make([]types.NetworkConfigData, 0, len(network.NetworkDevices)+len(network.VRFs)) - defaultConfig, err := getDefaultNetworkDevice(ctx, machineScope) + networkConfig, err := getNetworkDevices(ctx, machineScope, network) if err != nil { return nil, err } - networkConfigData = append(networkConfigData, defaultConfig...) - - additionalConfig, err := getAdditionalNetworkDevices(ctx, machineScope, network) - if err != nil { - return nil, err - } - networkConfigData = append(networkConfigData, additionalConfig...) + networkConfigData = append(networkConfigData, networkConfig...) virtualConfig, err := getVirtualNetworkDevices(ctx, machineScope, network, networkConfigData) if err != nil { @@ -207,37 +192,26 @@ func getNetworkConfigData(ctx context.Context, machineScope *scope.MachineScope) return networkConfigData, nil } -func getRoutingData(routes []infrav1alpha1.RouteSpec) *[]types.RoutingData { - routingData := make([]types.RoutingData, 0, len(routes)) - for _, route := range routes { - routeSpec := types.RoutingData{} - routeSpec.To = route.To - routeSpec.Via = route.Via - routeSpec.Metric = route.Metric - routeSpec.Table = route.Table - routingData = append(routingData, routeSpec) +// TODO: make this a thing +func getClusterNetwork(ctx context.Context, machineScope *scope.MachineScope) ([]corev1.TypedLocalObjectReference, error) { + ipv4 := machineScope.InfraCluster.ProxmoxCluster.Spec.IPv4Config + ipv6 := machineScope.InfraCluster.ProxmoxCluster.Spec.IPv6Config + + ret := make([]corev1.TypedLocalObjectReference, 0, 2) + if ipv4 != nil { } + if ipv6 != nil { - return &routingData + } + return ret, nil } -func getRoutingPolicyData(rules []infrav1alpha1.RoutingPolicySpec) *[]types.FIBRuleData { - routingPolicyData := make([]types.FIBRuleData, 0, len(rules)) - for _, rule := range rules { - ruleSpec := types.FIBRuleData{} - ruleSpec.To = rule.To - ruleSpec.From = rule.From - ruleSpec.Priority = rule.Priority - if rule.Table != nil { - ruleSpec.Table = *rule.Table - } - routingPolicyData = append(routingPolicyData, ruleSpec) +func getNetworkConfigDataForDevice(ctx context.Context, machineScope *scope.MachineScope, device string, ipPoolRefs map[corev1.TypedLocalObjectReference][]ipamv1.IPAddress) (*types.NetworkConfigData, error) { + if device == "" { + // this should never happen outwith tests + return nil, errors.New("empty device name") } - return &routingPolicyData -} - -func getNetworkConfigDataForDevice(ctx context.Context, machineScope *scope.MachineScope, device string) (*types.NetworkConfigData, error) { nets := machineScope.VirtualMachine.VirtualMachineConfig.MergeNets() // For nics supporting multiple IP addresses, we need to cut the '-inet' or '-inet6' part, // to retrieve the correct MAC address. @@ -247,142 +221,90 @@ func getNetworkConfigDataForDevice(ctx context.Context, machineScope *scope.Mach machineScope.Logger.Error(errors.New("unable to extract mac address"), "device has no mac address", "device", device) return nil, errors.New("unable to extract mac address") } - // retrieve IPAddress. - ipAddr, err := findIPAddress(ctx, machineScope, device) - if err != nil { - return nil, errors.Wrapf(err, "unable to find IPAddress, device=%s", device) + + var ipConfigs []types.IPConfig + for _, addresses := range ipPoolRefs { + ipConfig := types.IPConfig{} + for _, ipAddr := range addresses { + // TODO: IPConfigs is stupid. No need to gather metrics here. + ipConfig.IPAddress = IPAddressWithPrefix(ipAddr.Spec.Address, ipAddr.Spec.Prefix) + ipConfig.Gateway = ipAddr.Spec.Gateway + + metric, err := findIPAddressGatewayMetric(ctx, machineScope, &ipAddr) + if err != nil { + return nil, errors.Wrapf(err, "error converting metric annotation, kind=%s, name=%s", ipAddr.Spec.PoolRef.Kind, ipAddr.Spec.PoolRef.Name) + } + ipConfig.Metric = metric + } + ipConfigs = append(ipConfigs, ipConfig) } dns := machineScope.InfraCluster.ProxmoxCluster.Spec.DNSServers - ip := IPAddressWithPrefix(ipAddr.Spec.Address, ipAddr.Spec.Prefix) - gw := ipAddr.Spec.Gateway - metric, err := findIPAddressGatewayMetric(ctx, machineScope, ipAddr) - if err != nil { - return nil, errors.Wrapf(err, "error converting metric annotation, kind=%s, name=%s", ipAddr.Spec.PoolRef.Kind, ipAddr.Spec.PoolRef.Name) - } cloudinitNetworkConfigData := &types.NetworkConfigData{ + IPConfigs: ipConfigs, MacAddress: macAddress, DNSServers: dns, } - // If it's an IPv6 address, we must set Gateway6 and IPV6Address instead - if strings.Contains(ip, ":") { - cloudinitNetworkConfigData.Gateway6 = gw - cloudinitNetworkConfigData.Metric6 = metric - cloudinitNetworkConfigData.IPV6Address = ip - } else { - cloudinitNetworkConfigData.Gateway = gw - cloudinitNetworkConfigData.Metric = metric - cloudinitNetworkConfigData.IPAddress = ip - } - return cloudinitNetworkConfigData, nil } -func getDefaultNetworkDevice(ctx context.Context, machineScope *scope.MachineScope) ([]types.NetworkConfigData, error) { - var config types.NetworkConfigData - - // default network device ipv4. - if machineScope.InfraCluster.ProxmoxCluster.Spec.IPv4Config != nil || - (machineScope.ProxmoxMachine.Spec.Network != nil && machineScope.ProxmoxMachine.Spec.Network.Default.IPv4PoolRef != nil) { - conf, err := getNetworkConfigDataForDevice(ctx, machineScope, DefaultNetworkDeviceIPV4) - if err != nil { - return nil, errors.Wrapf(err, "unable to get network config data for device=%s", DefaultNetworkDeviceIPV4) - } - if machineScope.ProxmoxMachine.Spec.Network != nil && len(machineScope.ProxmoxMachine.Spec.Network.Default.DNSServers) != 0 { - config.DNSServers = machineScope.ProxmoxMachine.Spec.Network.Default.DNSServers - } - config = *conf +// getCommonInterfaceConfig sets data which is common to all types of network interfaces. +func getCommonInterfaceConfig(_ context.Context, _ *scope.MachineScope, ciconfig *types.NetworkConfigData, ifconfig infrav1.InterfaceConfig) { + if len(ifconfig.DNSServers) != 0 { + ciconfig.DNSServers = ifconfig.DNSServers } + ciconfig.Routes = ifconfig.Routing.Routes + ciconfig.FIBRules = ifconfig.Routing.RoutingPolicy + ciconfig.LinkMTU = ifconfig.LinkMTU +} - // default network device ipv6. - if machineScope.InfraCluster.ProxmoxCluster.Spec.IPv6Config != nil || - (machineScope.ProxmoxMachine.Spec.Network != nil && machineScope.ProxmoxMachine.Spec.Network.Default.IPv6PoolRef != nil) { - conf, err := getNetworkConfigDataForDevice(ctx, machineScope, DefaultNetworkDeviceIPV6) - if err != nil { - return nil, errors.Wrapf(err, "unable to get network config data for device=%s", DefaultNetworkDeviceIPV6) - } - - switch { - case len(config.MacAddress) == 0: - config = *conf - case config.MacAddress != conf.MacAddress: - return nil, errors.New("default network device ipv4 and ipv6 have different mac addresses") - default: - config.IPV6Address = conf.IPV6Address - config.Gateway6 = conf.Gateway6 - config.Metric6 = conf.Metric6 - } - - if machineScope.ProxmoxMachine.Spec.Network != nil && len(machineScope.ProxmoxMachine.Spec.Network.Default.DNSServers) != 0 { - config.DNSServers = machineScope.ProxmoxMachine.Spec.Network.Default.DNSServers - } - } +func getNetworkDevices(ctx context.Context, machineScope *scope.MachineScope, network infrav1.NetworkSpec) ([]types.NetworkConfigData, error) { + networkConfigData := make([]types.NetworkConfigData, 0, len(network.NetworkDevices)) + ipAddressMap := make(map[string]map[corev1.TypedLocalObjectReference][]ipamv1.IPAddress) - // Default Network Device lacks a datastructure to transport MTU. - // We can use the Proxmox Device MTU instead to enable non virtio devices - // the usage of jumbo frames. This has the minor drawback of coalescing proxmox - // MTU with interface MTU, which shouldn't matter in almost all cases. - if network := machineScope.ProxmoxMachine.Spec.Network; network != nil { - if network.Default != nil { - if network.Default.MTU != nil && *network.Default.MTU >= 576 { - config.LinkMTU = network.Default.MTU - } - } + requeue, err := handleDevices(ctx, machineScope, ipAddressMap) + if requeue || err != nil { + // invalid state. Machine should've had all IPs assigned + return nil, errors.Wrapf(err, "unable to get IPs for network config data") } - config.Name = "eth0" - config.Type = "ethernet" - config.ProxName = "net0" + // network devices. + for i, nic := range network.NetworkDevices { + var config = ptr.To(types.NetworkConfigData{}) - return []types.NetworkConfigData{config}, nil -} + // TODO: Default device IPPool api change + ipPoolRefs := ipAddressMap[*nic.Name] -func getCommonInterfaceConfig(ctx context.Context, machineScope *scope.MachineScope, ciconfig *types.NetworkConfigData, nic infrav1alpha1.AdditionalNetworkDevice) error { - if len(nic.DNSServers) != 0 { - ciconfig.DNSServers = nic.DNSServers - } - ciconfig.Routes = *getRoutingData(nic.InterfaceConfig.Routing.Routes) - ciconfig.FIBRules = *getRoutingPolicyData(nic.InterfaceConfig.Routing.RoutingPolicy) - ciconfig.LinkMTU = nic.InterfaceConfig.LinkMTU - - // Only set IPAddresses if they haven't been set yet - if ippool := nic.NetworkDevice.IPv4PoolRef; ippool != nil && ciconfig.IPAddress == "" { - // retrieve IPAddress. - var ifname = fmt.Sprintf("%s-%s", ciconfig.Name, infrav1alpha1.DefaultSuffix) - ipAddr, err := findIPAddress(ctx, machineScope, ifname) + conf, err := getNetworkConfigDataForDevice(ctx, machineScope, *nic.Name, ipPoolRefs) if err != nil { - return errors.Wrapf(err, "unable to find IPAddress, device=%s", ifname) + return nil, errors.Wrapf(err, "unable to get network config data for device=%s", *nic.Name) } - metric, err := findIPAddressGatewayMetric(ctx, machineScope, ipAddr) - if err != nil { - return errors.Wrapf(err, "error converting metric annotation, kind=%s, name=%s", ipAddr.Spec.PoolRef.Kind, ipAddr.Spec.PoolRef.Name) + if len(nic.DNSServers) != 0 { + config.DNSServers = nic.DNSServers } + config = conf - ciconfig.IPAddress = IPAddressWithPrefix(ipAddr.Spec.Address, ipAddr.Spec.Prefix) - ciconfig.Gateway = ipAddr.Spec.Gateway - ciconfig.Metric = metric - } - if nic.NetworkDevice.IPv6PoolRef != nil && ciconfig.IPV6Address == "" { - var ifname = fmt.Sprintf("%s-%s", ciconfig.Name, infrav1alpha1.DefaultSuffix+"6") - ipAddr, err := findIPAddress(ctx, machineScope, ifname) - if err != nil { - return errors.Wrapf(err, "unable to find IPAddress, device=%s", ifname) + getCommonInterfaceConfig(ctx, machineScope, config, nic.InterfaceConfig) + + config.Name = fmt.Sprintf("eth%d", i) + config.Type = "ethernet" + config.ProxName = nic.Name + + // TODO: Figure device names for eth0 + if i == 0 { + config.ProxName = ptr.To("net0") } - metric, err := findIPAddressGatewayMetric(ctx, machineScope, ipAddr) - if err != nil { - return errors.Wrapf(err, "error converting metric annotation, kind=%s, name=%s", ipAddr.Spec.PoolRef.Kind, ipAddr.Spec.PoolRef.Name) + + if len(config.MacAddress) > 0 { + networkConfigData = append(networkConfigData, *config) } - ciconfig.IPV6Address = IPAddressWithPrefix(ipAddr.Spec.Address, ipAddr.Spec.Prefix) - ciconfig.Gateway6 = ipAddr.Spec.Gateway - ciconfig.Metric6 = metric } - - return nil + return networkConfigData, nil } -func getVirtualNetworkDevices(_ context.Context, _ *scope.MachineScope, network infrav1alpha1.NetworkSpec, data []types.NetworkConfigData) ([]types.NetworkConfigData, error) { +func getVirtualNetworkDevices(_ context.Context, _ *scope.MachineScope, network infrav1.NetworkSpec, data []types.NetworkConfigData) ([]types.NetworkConfigData, error) { networkConfigData := make([]types.NetworkConfigData, 0, len(network.VRFs)) for _, device := range network.VRFs { @@ -393,75 +315,22 @@ func getVirtualNetworkDevices(_ context.Context, _ *scope.MachineScope, network for i, child := range device.Interfaces { for _, net := range data { - if (net.Name == child) || (net.ProxName == child) { + if (net.Name == *child) || (ptr.Deref(net.ProxName, "") == *child) { config.Interfaces = append(config.Interfaces, net.Name) } } if len(config.Interfaces)-1 < i { - return nil, errors.Errorf("unable to find vrf interface=%s child interface %s", config.Name, child) + return nil, errors.Errorf("unable to find vrf interface=%s child interface %s", config.Name, *child) } } - config.Routes = *getRoutingData(device.Routing.Routes) - config.FIBRules = *getRoutingPolicyData(device.Routing.RoutingPolicy) + config.Routes = device.Routing.Routes + config.FIBRules = device.Routing.RoutingPolicy networkConfigData = append(networkConfigData, *config) } return networkConfigData, nil } -func getAdditionalNetworkDevices(ctx context.Context, machineScope *scope.MachineScope, network infrav1alpha1.NetworkSpec) ([]types.NetworkConfigData, error) { - networkConfigData := make([]types.NetworkConfigData, 0, len(network.AdditionalDevices)) - - // additional network devices append after the provisioning interface - var index = 1 - // additional network devices. - for _, nic := range network.AdditionalDevices { - var config = ptr.To(types.NetworkConfigData{}) - - if nic.IPv4PoolRef != nil { - device := fmt.Sprintf("%s-%s", nic.Name, infrav1alpha1.DefaultSuffix) - conf, err := getNetworkConfigDataForDevice(ctx, machineScope, device) - if err != nil { - return nil, errors.Wrapf(err, "unable to get network config data for device=%s", device) - } - if len(nic.DNSServers) != 0 { - config.DNSServers = nic.DNSServers - } - config = conf - } - - if nic.IPv6PoolRef != nil { - suffix := infrav1alpha1.DefaultSuffix + "6" - device := fmt.Sprintf("%s-%s", nic.Name, suffix) - conf, err := getNetworkConfigDataForDevice(ctx, machineScope, device) - if err != nil { - return nil, errors.Wrapf(err, "unable to get network config data for device=%s", device) - } - if len(nic.DNSServers) != 0 { - config.DNSServers = nic.DNSServers - } - - config.IPV6Address = conf.IPV6Address - config.Gateway6 = conf.Gateway6 - } - - err := getCommonInterfaceConfig(ctx, machineScope, config, nic) - if err != nil { - return nil, errors.Wrapf(err, "unable to get network config data for device=%s", nic.Name) - } - - config.Name = fmt.Sprintf("eth%d", index) - index++ - config.Type = "ethernet" - config.ProxName = nic.Name - - if len(config.MacAddress) > 0 { - networkConfigData = append(networkConfigData, *config) - } - } - return networkConfigData, nil -} - func vmHasMacAddresses(machineScope *scope.MachineScope) bool { nets := machineScope.VirtualMachine.VirtualMachineConfig.MergeNets() if len(nets) == 0 { diff --git a/internal/service/vmservice/bootstrap_test.go b/internal/service/vmservice/bootstrap_test.go index cf385e0d..93f0ae1e 100644 --- a/internal/service/vmservice/bootstrap_test.go +++ b/internal/service/vmservice/bootstrap_test.go @@ -28,9 +28,12 @@ import ( "k8s.io/utils/ptr" "sigs.k8s.io/cluster-api/util/conditions" - infrav1alpha1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" + ipamicv1 "sigs.k8s.io/cluster-api-ipam-provider-in-cluster/api/v1alpha2" + + infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha2" "github.com/ionos-cloud/cluster-api-provider-proxmox/internal/inject" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/cloudinit" + . "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/consts" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/ignition" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/scope" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/types" @@ -40,89 +43,171 @@ const ( biosUUID = "uuid=41ec1197-580f-460b-b41b-1dfefabe6e32" ) -func TestReconcileBootstrapData_MissingIPAddress(t *testing.T) { - machineScope, _, _ := setupReconcilerTest(t) - - requeue, err := reconcileBootstrapData(context.Background(), machineScope) - require.NoError(t, err) - require.True(t, requeue) - requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition) +var defaultNic = infrav1.NetworkDevice{ + Bridge: ptr.To("vmbr0"), + Model: ptr.To("virtio"), + Name: ptr.To(infrav1.DefaultNetworkDevice), } -func TestReconcileBootstrapData_MissingMACAddress(t *testing.T) { - machineScope, _, _ := setupReconcilerTest(t) - machineScope.SetVirtualMachine(newStoppedVM()) - machineScope.ProxmoxMachine.Status.IPAddresses = map[string]infrav1alpha1.IPAddress{infrav1alpha1.DefaultNetworkDevice: {IPV4: "10.10.10.10"}} +func setupFakeIsoInjector(t *testing.T) *[]byte { + networkData := new([]byte) + getISOInjector = func(vm *proxmox.VirtualMachine, bootstrapData []byte, metadata, network cloudinit.Renderer) isoInjector { + *networkData, _ = network.Inspect() + return FakeISOInjector{ + VirtualMachine: vm, + BootstrapData: bootstrapData, + MetaData: metadata, + Network: network, + } + } + t.Cleanup(func() { getISOInjector = defaultISOInjector }) - requeue, err := reconcileBootstrapData(context.Background(), machineScope) - require.NoError(t, err) - require.True(t, requeue) - require.False(t, conditions.Has(machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition)) + return networkData } -func TestReconcileBootstrapData_NoNetworkConfig_UpdateStatus(t *testing.T) { - machineScope, _, kubeClient := setupReconcilerTest(t) - getISOInjector = func(_ *proxmox.VirtualMachine, _ []byte, _, _ cloudinit.Renderer) isoInjector { - return FakeISOInjector{} +func setupVMWithMetadata(t *testing.T, machineScope *scope.MachineScope, netSpecs ...string) *proxmox.VirtualMachine { + if len(netSpecs) == 0 { + netSpecs = []string{"virtio=A6:23:64:4D:84:CB,bridge=vmbr0"} } - t.Cleanup(func() { getISOInjector = defaultISOInjector }) - - vm := newVMWithNets("virtio=A6:23:64:4D:84:CB,bridge=vmbr0") + vm := newVMWithNets(netSpecs[0], netSpecs[1:]...) vm.VirtualMachineConfig.SMBios1 = biosUUID machineScope.SetVirtualMachine(vm) - machineScope.ProxmoxMachine.Status.IPAddresses = map[string]infrav1alpha1.IPAddress{infrav1alpha1.DefaultNetworkDevice: {IPV4: "10.10.10.10"}} - createIP4AddressResource(t, kubeClient, machineScope, infrav1alpha1.DefaultNetworkDevice, "10.10.10.10") + + return vm +} + +// TestReconcileBootsrapData_NoNetworkConfig_UpdateStatus tests the simplest setup +// with only a default network pool being reconciled correctly. +func TestReconcileBootstrapData_NoNetworkConfig_UpdateStatus(t *testing.T) { + machineScope, _, kubeClient := setupReconcilerTestWithCondition(t, infrav1.WaitingForBootstrapDataReconcilationReason) + + // setup bootstrapdata injection fake + networkDataPtr := setupFakeIsoInjector(t) + + // setup VM with all metadata + setupVMWithMetadata(t, machineScope) createBootstrapSecret(t, kubeClient, machineScope, cloudinit.FormatCloudConfig) + // NetworkSetup + machineScope.ProxmoxMachine.Spec.Network = &infrav1.NetworkSpec{ + NetworkDevices: []infrav1.NetworkDevice{ + defaultNic, + }, + } + defaultPool := corev1.TypedLocalObjectReference{APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: InClusterIPPool, + Name: getDefaultPoolRefs(machineScope)[0].Name, + } + createIPPools(t, kubeClient, machineScope) + createIPv4AddressResource(t, kubeClient, machineScope, infrav1.DefaultNetworkDevice, "10.10.10.10", &defaultPool) + + // reconcile BootstrapData requeue, err := reconcileBootstrapData(context.Background(), machineScope) require.NoError(t, err) require.False(t, requeue) - require.False(t, conditions.Has(machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition)) + + // Check generated bootstrapData against setup + networkConfigData := getNetworkConfigDataFromVM(t, *networkDataPtr) + require.True(t, len(networkConfigData) == 1) + require.Equal(t, "10.10.10.10"+"/24", networkConfigData[0].IPConfigs[0].IPAddress) + require.Equal(t, "A6:23:64:4D:84:CB", networkConfigData[0].MacAddress) + require.Equal(t, "eth0", networkConfigData[0].Name) + require.Equal(t, "net0", *networkConfigData[0].ProxName) + require.Equal(t, "ethernet", networkConfigData[0].Type) + + require.Equal(t, infrav1.WaitingForVMPowerUpReason, conditions.GetReason(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition)) require.True(t, *machineScope.ProxmoxMachine.Status.BootstrapDataProvided) } +// TestReconcileBootstrapData_UpdateStatus. func TestReconcileBootstrapData_UpdateStatus(t *testing.T) { - machineScope, _, kubeClient := setupReconcilerTest(t) - machineScope.ProxmoxMachine.Spec.Network = &infrav1alpha1.NetworkSpec{ - Default: &infrav1alpha1.NetworkDevice{ - Bridge: "vmbr0", - Model: ptr.To("virtio"), - }, - AdditionalDevices: []infrav1alpha1.AdditionalNetworkDevice{ + machineScope, _, kubeClient := setupReconcilerTestWithCondition(t, infrav1.WaitingForBootstrapDataReconcilationReason) + + // setup bootstrapdata injection fake + networkDataPtr := setupFakeIsoInjector(t) + + // NetworkSetup + defaultPool := corev1.TypedLocalObjectReference{APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: InClusterIPPool, + Name: getDefaultPoolRefs(machineScope)[0].Name, + } + extraPool0 := corev1.TypedLocalObjectReference{APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: GlobalInClusterIPPool, + Name: "extraPool0", + } + machineScope.ProxmoxMachine.Spec.Network = &infrav1.NetworkSpec{ + NetworkDevices: []infrav1.NetworkDevice{ + defaultNic, { - NetworkDevice: infrav1alpha1.NetworkDevice{Bridge: "vmbr1", Model: ptr.To("virtio"), DNSServers: []string{"1.2.3.4"}}, - Name: "net1", - InterfaceConfig: infrav1alpha1.InterfaceConfig{}, + Bridge: ptr.To("vmbr1"), + Model: ptr.To("virtio"), + Name: ptr.To("net1"), + InterfaceConfig: infrav1.InterfaceConfig{ + DNSServers: []string{"1.2.3.4"}, + IPPoolRef: []corev1.TypedLocalObjectReference{extraPool0}, + }, }, }, } - vm := newVMWithNets("virtio=A6:23:64:4D:84:CB,bridge=vmbr0", "virtio=AA:23:64:4D:84:CD,bridge=vmbr1") - vm.VirtualMachineConfig.SMBios1 = biosUUID - machineScope.SetVirtualMachine(vm) - machineScope.ProxmoxMachine.Status.IPAddresses = map[string]infrav1alpha1.IPAddress{infrav1alpha1.DefaultNetworkDevice: {IPV4: "10.10.10.10"}, "net1": {IPV4: "10.100.10.10"}} - createIP4AddressResource(t, kubeClient, machineScope, infrav1alpha1.DefaultNetworkDevice, "10.10.10.10") - createIP4AddressResource(t, kubeClient, machineScope, "net1", "10.100.10.10") + + createIPPools(t, kubeClient, machineScope) + + // update extraPool for gateway/prefix test + poolObj := getIPAddressPool(t, kubeClient, machineScope, &extraPool0) + poolObj.(*ipamicv1.GlobalInClusterIPPool).Spec.Prefix = 16 + poolObj.(*ipamicv1.GlobalInClusterIPPool).Spec.Gateway = "10.100.10.1" + createOrUpdateIPPool(t, kubeClient, machineScope, nil, poolObj) + + createIPv4AddressResource(t, kubeClient, machineScope, infrav1.DefaultNetworkDevice, "10.10.10.10", &defaultPool) + createIPv4AddressResource(t, kubeClient, machineScope, "net1", "10.100.10.10", &extraPool0) + + setupVMWithMetadata(t, machineScope, "virtio=A6:23:64:4D:84:CB,bridge=vmbr0", "virtio=AA:23:64:4D:84:CD,bridge=vmbr1") createBootstrapSecret(t, kubeClient, machineScope, cloudinit.FormatCloudConfig) - getISOInjector = func(_ *proxmox.VirtualMachine, _ []byte, _, _ cloudinit.Renderer) isoInjector { - return FakeISOInjector{} - } - t.Cleanup(func() { getISOInjector = defaultISOInjector }) + // reconcile BootstrapData requeue, err := reconcileBootstrapData(context.Background(), machineScope) require.NoError(t, err) require.False(t, requeue) - require.False(t, conditions.Has(machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition)) + + // Check generated bootstrapData against setup + networkConfigData := getNetworkConfigDataFromVM(t, *networkDataPtr) + require.True(t, len(networkConfigData) > 0) + require.Equal(t, "10.10.10.10"+"/24", networkConfigData[0].IPConfigs[0].IPAddress) + require.Equal(t, "A6:23:64:4D:84:CB", networkConfigData[0].MacAddress) + require.Equal(t, "eth0", networkConfigData[0].Name) + require.Equal(t, "net0", *networkConfigData[0].ProxName) + require.Equal(t, "ethernet", networkConfigData[0].Type) + require.Equal(t, "10.100.10.10"+"/16", networkConfigData[1].IPConfigs[0].IPAddress) + require.Equal(t, "10.100.10.1", networkConfigData[1].IPConfigs[0].Gateway) + require.Equal(t, "AA:23:64:4D:84:CD", networkConfigData[1].MacAddress) + require.Equal(t, "eth1", networkConfigData[1].Name) + require.Equal(t, "net1", *networkConfigData[1].ProxName) + require.Equal(t, "ethernet", networkConfigData[1].Type) + + require.Equal(t, infrav1.WaitingForVMPowerUpReason, conditions.GetReason(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition)) require.True(t, *machineScope.ProxmoxMachine.Status.BootstrapDataProvided) } +// TestReconcileBootstrapData_BadInjector is supposed to fail when rendering VM configuration data. func TestReconcileBootstrapData_BadInjector(t *testing.T) { - machineScope, _, kubeClient := setupReconcilerTest(t) + machineScope, _, kubeClient := setupReconcilerTestWithCondition(t, infrav1.WaitingForBootstrapDataReconcilationReason) vm := newVMWithNets("virtio=A6:23:64:4D:84:CB,bridge=vmbr0") machineScope.SetVirtualMachine(vm) - machineScope.ProxmoxMachine.Status.IPAddresses = map[string]infrav1alpha1.IPAddress{infrav1alpha1.DefaultNetworkDevice: {IPV4: "10.10.10.10"}} - createIP4AddressResource(t, kubeClient, machineScope, infrav1alpha1.DefaultNetworkDevice, "10.10.10.10") createBootstrapSecret(t, kubeClient, machineScope, cloudinit.FormatCloudConfig) + // NetworkSetup + machineScope.ProxmoxMachine.Spec.Network = &infrav1.NetworkSpec{ + NetworkDevices: []infrav1.NetworkDevice{ + defaultNic, + }, + } + defaultPool := corev1.TypedLocalObjectReference{APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: "InClusterIPPool", + Name: getDefaultPoolRefs(machineScope)[0].Name, + } + createIPPools(t, kubeClient, machineScope) + createIPv4AddressResource(t, kubeClient, machineScope, infrav1.DefaultNetworkDevice, "10.10.10.10", &defaultPool) + getISOInjector = func(_ *proxmox.VirtualMachine, _ []byte, _, _ cloudinit.Renderer) isoInjector { return FakeISOInjector{Error: errors.New("bad FakeISOInjector")} } @@ -132,12 +217,12 @@ func TestReconcileBootstrapData_BadInjector(t *testing.T) { require.Error(t, err) require.Contains(t, err.Error(), "failed to inject bootstrap data: bad FakeISOInjector") require.False(t, requeue) - require.True(t, conditions.Has(machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition)) + require.True(t, conditions.Has(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition)) require.Nil(t, machineScope.ProxmoxMachine.Status.BootstrapDataProvided) } func TestGetBootstrapData_MissingSecretName(t *testing.T) { - machineScope, _, _ := setupReconcilerTest(t) + machineScope, _, _ := setupReconcilerTestWithCondition(t, infrav1.WaitingForBootstrapDataReconcilationReason) data, _, err := getBootstrapData(context.Background(), machineScope) require.Error(t, err) @@ -146,7 +231,7 @@ func TestGetBootstrapData_MissingSecretName(t *testing.T) { } func TestGetBootstrapData_MissingSecretNotName(t *testing.T) { - machineScope, _, _ := setupReconcilerTest(t) + machineScope, _, _ := setupReconcilerTestWithCondition(t, infrav1.WaitingForBootstrapDataReconcilationReason) machineScope.Machine.Spec.Bootstrap.DataSecretName = ptr.To("foo") data, _, err := getBootstrapData(context.Background(), machineScope) @@ -157,7 +242,7 @@ func TestGetBootstrapData_MissingSecretNotName(t *testing.T) { } func TestGetBootstrapData_MissingSecretValue(t *testing.T) { - machineScope, _, client := setupReconcilerTest(t) + machineScope, _, client := setupReconcilerTestWithCondition(t, infrav1.WaitingForBootstrapDataReconcilationReason) machineScope.Machine.Spec.Bootstrap.DataSecretName = ptr.To(machineScope.Name()) // missing format @@ -187,97 +272,95 @@ func TestGetBootstrapData_MissingSecretValue(t *testing.T) { } func TestGetNetworkConfigDataForDevice_MissingIPAddress(t *testing.T) { - machineScope, _, _ := setupReconcilerTest(t) + machineScope, _, _ := setupReconcilerTestWithCondition(t, infrav1.WaitingForBootstrapDataReconcilationReason) vm := newVMWithNets("virtio=A6:23:64:4D:84:CB,bridge=vmbr0") machineScope.SetVirtualMachine(vm) - cfg, err := getNetworkConfigDataForDevice(context.Background(), machineScope, "net0") - require.Error(t, err) - require.Nil(t, cfg) + cfg, err := getNetworkConfigDataForDevice(context.Background(), machineScope, "net0", nil) + require.NoError(t, err) + require.Equal(t, cfg.MacAddress, "A6:23:64:4D:84:CB") + require.Len(t, cfg.IPConfigs, 0) } func TestGetNetworkConfigDataForDevice_MissingMACAddress(t *testing.T) { - machineScope, _, _ := setupReconcilerTest(t) + machineScope, _, _ := setupReconcilerTestWithCondition(t, infrav1.WaitingForBootstrapDataReconcilationReason) machineScope.SetVirtualMachine(newStoppedVM()) - cfg, err := getNetworkConfigDataForDevice(context.Background(), machineScope, "net2") + cfg, err := getNetworkConfigDataForDevice(context.Background(), machineScope, "net2", nil) require.Error(t, err) + require.Equal(t, "unable to extract mac address", err.Error()) require.Nil(t, cfg) } func TestGetCommonInterfaceConfig_MissingIPPool(t *testing.T) { - machineScope, _, _ := setupReconcilerTest(t) + machineScope, _, _ := setupReconcilerTestWithCondition(t, infrav1.WaitingForBootstrapDataReconcilationReason) - machineScope.ProxmoxMachine.Spec.Network = &infrav1alpha1.NetworkSpec{ - AdditionalDevices: []infrav1alpha1.AdditionalNetworkDevice{ + machineScope.ProxmoxMachine.Spec.Network = &infrav1.NetworkSpec{ + NetworkDevices: []infrav1.NetworkDevice{ + defaultNic, { - NetworkDevice: infrav1alpha1.NetworkDevice{Bridge: "vmbr1", Model: ptr.To("virtio"), IPPoolConfig: infrav1alpha1.IPPoolConfig{ - IPv4PoolRef: &corev1.TypedLocalObjectReference{ + Bridge: ptr.To("vmbr1"), + Model: ptr.To("virtio"), + Name: ptr.To("net1"), + InterfaceConfig: infrav1.InterfaceConfig{ + IPPoolRef: []corev1.TypedLocalObjectReference{{ APIGroup: ptr.To("ipam.cluster.x-k8s.io"), - Kind: "GlobalInClusterIPPool", + Kind: GlobalInClusterIPPool, Name: "net1-inet", - }, - }}, - Name: "net1", - InterfaceConfig: infrav1alpha1.InterfaceConfig{}, + }}, + }, }, }, } cfg := &types.NetworkConfigData{Name: "net1"} - err := getCommonInterfaceConfig(context.Background(), machineScope, cfg, machineScope.ProxmoxMachine.Spec.Network.AdditionalDevices[0]) - require.Error(t, err) + getCommonInterfaceConfig(context.Background(), machineScope, cfg, machineScope.ProxmoxMachine.Spec.Network.NetworkDevices[0].InterfaceConfig) + // Check that no IP config has been assigned even in the presence of an IPPoolRef. + require.Len(t, cfg.IPConfigs, 0) } +/* Test makes no sense, getCommonInterfaceConfig no longer assings addresses func TestGetCommonInterfaceConfig_NoIPAddresses(t *testing.T) { - machineScope, _, _ := setupReconcilerTest(t) + machineScope, _, _ := setupReconcilerTestWithCondition(t, infrav1.WaitingForBootstrapDataReconcilationReason) - machineScope.ProxmoxMachine.Spec.Network = &infrav1alpha1.NetworkSpec{ - AdditionalDevices: []infrav1alpha1.AdditionalNetworkDevice{ + machineScope.ProxmoxMachine.Spec.Network = &infrav1.NetworkSpec{ + NetworkDevices: []infrav1.NetworkDevice{ { - NetworkDevice: infrav1alpha1.NetworkDevice{Bridge: "vmbr1", Model: ptr.To("virtio")}, - Name: "net1", + Bridge: ptr.To("vmbr1"), + Model: ptr.To("virtio"), + Name: ptr.To("net1"), }, }, } cfg := &types.NetworkConfigData{Name: "net1"} - err := getCommonInterfaceConfig(context.Background(), machineScope, cfg, machineScope.ProxmoxMachine.Spec.Network.AdditionalDevices[0]) - require.NoError(t, err) + getCommonInterfaceConfig(context.Background(), machineScope, cfg, machineScope.ProxmoxMachine.Spec.Network.NetworkDevices[0].InterfaceConfig) + // Check that no IP config has been assigned. + require.Len(t, cfg.IPConfigs, 0) } +*/ func TestGetCommonInterfaceConfig(t *testing.T) { - machineScope, _, kubeClient := setupReconcilerTest(t) + machineScope, _, _ := setupReconcilerTestWithCondition(t, infrav1.WaitingForBootstrapDataReconcilationReason) - var MTU uint16 = 9000 - machineScope.ProxmoxMachine.Spec.Network = &infrav1alpha1.NetworkSpec{ - AdditionalDevices: []infrav1alpha1.AdditionalNetworkDevice{ + var MTU int32 = 9000 + machineScope.ProxmoxMachine.Spec.Network = &infrav1.NetworkSpec{ + NetworkDevices: []infrav1.NetworkDevice{ { - NetworkDevice: infrav1alpha1.NetworkDevice{Bridge: "vmbr1", Model: ptr.To("virtio"), - IPPoolConfig: infrav1alpha1.IPPoolConfig{ - IPv6PoolRef: &corev1.TypedLocalObjectReference{ - APIGroup: ptr.To("ipam.cluster.x-k8s.io"), - Kind: "GlobalInClusterIPPool", - Name: "net1-inet6", - }, - IPv4PoolRef: &corev1.TypedLocalObjectReference{ - APIGroup: ptr.To("ipam.cluster.x-k8s.io"), - Kind: "GlobalInClusterIPPool", - Name: "net1-inet", - }, - }, - DNSServers: []string{"1.2.3.4"}}, - Name: "net1", - InterfaceConfig: infrav1alpha1.InterfaceConfig{ - LinkMTU: &MTU, - Routing: infrav1alpha1.Routing{ - Routes: []infrav1alpha1.RouteSpec{ - {To: "default", Via: "192.168.178.1"}, - {To: "172.24.16.0/24", Via: "192.168.178.1", Table: 100}, + Bridge: ptr.To("vmbr1"), + Model: ptr.To("virtio"), + Name: ptr.To("net1"), + InterfaceConfig: infrav1.InterfaceConfig{ + DNSServers: []string{"1.2.3.4"}, + LinkMTU: &MTU, + Routing: infrav1.Routing{ + Routes: []infrav1.RouteSpec{ + {To: ptr.To("default"), Via: ptr.To("192.168.178.1")}, + {To: ptr.To("172.24.16.0/24"), Via: ptr.To("192.168.178.1"), Table: ptr.To(int32(100))}, }, - RoutingPolicy: []infrav1alpha1.RoutingPolicySpec{ - {To: "10.10.10.0/24", Table: ptr.To(uint32(100))}, - {From: "172.24.16.0/24", Table: ptr.To(uint32(100))}, + RoutingPolicy: []infrav1.RoutingPolicySpec{ + {To: ptr.To("10.10.10.0/24"), Table: ptr.To(int32(100))}, + {From: ptr.To("172.24.16.0/24"), Table: ptr.To(int32(100))}, }, }, }, @@ -285,36 +368,25 @@ func TestGetCommonInterfaceConfig(t *testing.T) { }, } - vm := newVMWithNets("virtio=A6:23:64:4D:84:CB,bridge=vmbr0", "virtio=AA:23:64:4D:84:CD,bridge=vmbr1") - vm.VirtualMachineConfig.SMBios1 = biosUUID - machineScope.SetVirtualMachine(vm) - machineScope.ProxmoxMachine.Status.IPAddresses = map[string]infrav1alpha1.IPAddress{infrav1alpha1.DefaultNetworkDevice: {IPV4: "10.10.10.10", IPV6: "2001:db8::2"}, "net1": {IPV4: "10.0.0.10", IPV6: "2001:db8::9"}} - createIPPools(t, kubeClient, machineScope) - createIP4AddressResource(t, kubeClient, machineScope, "net1", "10.0.0.10") - createIP6AddressResource(t, kubeClient, machineScope, "net1", "2001:db8::9") - cfg := &types.NetworkConfigData{Name: "net1"} - err := getCommonInterfaceConfig(context.Background(), machineScope, cfg, machineScope.ProxmoxMachine.Spec.Network.AdditionalDevices[0]) - require.Equal(t, "10.0.0.10/24", cfg.IPAddress) - require.Equal(t, "2001:db8::9/64", cfg.IPV6Address) + getCommonInterfaceConfig(context.Background(), machineScope, cfg, machineScope.ProxmoxMachine.Spec.Network.NetworkDevices[0].InterfaceConfig) require.Equal(t, "1.2.3.4", cfg.DNSServers[0]) - require.Equal(t, "default", cfg.Routes[0].To) - require.Equal(t, "172.24.16.0/24", cfg.Routes[1].To) - require.Equal(t, "10.10.10.0/24", cfg.FIBRules[0].To) - require.Equal(t, "172.24.16.0/24", cfg.FIBRules[1].From) - require.NoError(t, err) + require.Equal(t, "default", *cfg.Routes[0].To) + require.Equal(t, "172.24.16.0/24", *cfg.Routes[1].To) + require.Equal(t, "10.10.10.0/24", *cfg.FIBRules[0].To) + require.Equal(t, "172.24.16.0/24", *cfg.FIBRules[1].From) } func TestGetVirtualNetworkDevices_VRFDevice_MissingInterface(t *testing.T) { - machineScope, _, _ := setupReconcilerTest(t) + machineScope, _, _ := setupReconcilerTestWithCondition(t, infrav1.WaitingForBootstrapDataReconcilationReason) machineScope.SetVirtualMachine(newStoppedVM()) - networkSpec := infrav1alpha1.NetworkSpec{ - VirtualNetworkDevices: infrav1alpha1.VirtualNetworkDevices{ - VRFs: []infrav1alpha1.VRFDevice{{ + networkSpec := infrav1.NetworkSpec{ + VirtualNetworkDevices: infrav1.VirtualNetworkDevices{ + VRFs: []infrav1.VRFDevice{{ Name: "vrf-blue", Table: 500, - Interfaces: []string{"net1"}, + Interfaces: []infrav1.NetName{ptr.To("net1")}, }}, }, } @@ -322,140 +394,236 @@ func TestGetVirtualNetworkDevices_VRFDevice_MissingInterface(t *testing.T) { cfg, err := getVirtualNetworkDevices(context.Background(), machineScope, networkSpec, networkConfigData) require.Error(t, err) + require.Equal(t, "unable to find vrf interface=vrf-blue child interface net1", err.Error()) require.Nil(t, cfg) } func TestReconcileBootstrapData_DualStack(t *testing.T) { - machineScope, _, kubeClient := setupReconcilerTest(t) - machineScope.InfraCluster.ProxmoxCluster.Spec.IPv6Config = &infrav1alpha1.IPConfigSpec{ - Addresses: []string{"2001:db8::/64"}, - Prefix: 64, + machineScope, _, kubeClient := setupReconcilerTestWithCondition(t, infrav1.WaitingForBootstrapDataReconcilationReason) + setupVMWithMetadata(t, machineScope, "virtio=A6:23:64:4D:84:CB,bridge=vmbr0", "virtio=AA:23:64:4D:84:CD,bridge=vmbr1") + networkDataPtr := setupFakeIsoInjector(t) + createBootstrapSecret(t, kubeClient, machineScope, cloudinit.FormatCloudConfig) + + proxmoxCluster := machineScope.InfraCluster.ProxmoxCluster + proxmoxCluster.Spec.IPv6Config = &infrav1.IPConfigSpec{ + Addresses: []string{"2001:db8::/96"}, + Prefix: 96, Gateway: "2001:db8::1", } + require.NoError(t, kubeClient.Update(context.Background(), proxmoxCluster)) + proxmoxCluster.Status.InClusterIPPoolRef = []corev1.LocalObjectReference{ + {Name: "test-v4-icip"}, + {Name: "test-v6-icip"}, + } + require.NoError(t, kubeClient.Status().Update(context.Background(), proxmoxCluster)) - vm := newVMWithNets("virtio=A6:23:64:4D:84:CB,bridge=vmbr0", "virtio=AA:23:64:4D:84:CD,bridge=vmbr1") - vm.VirtualMachineConfig.SMBios1 = biosUUID - machineScope.SetVirtualMachine(vm) - machineScope.ProxmoxMachine.Status.IPAddresses = map[string]infrav1alpha1.IPAddress{infrav1alpha1.DefaultNetworkDevice: {IPV4: "10.10.10.10", IPV6: "2001:db8::2"}} - createIP4AddressResource(t, kubeClient, machineScope, infrav1alpha1.DefaultNetworkDevice, "10.10.10.10") - createIP6AddressResource(t, kubeClient, machineScope, infrav1alpha1.DefaultNetworkDevice, "2001:db8::2") - - createBootstrapSecret(t, kubeClient, machineScope, cloudinit.FormatCloudConfig) - getISOInjector = func(_ *proxmox.VirtualMachine, _ []byte, _, _ cloudinit.Renderer) isoInjector { - return FakeISOInjector{} + // NetworkSetup for default pools + defaultPool := corev1.TypedLocalObjectReference{APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: InClusterIPPool, + Name: getDefaultPoolRefs(machineScope)[0].Name, + } + defaultPoolV6 := corev1.TypedLocalObjectReference{APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: InClusterIPPool, + Name: getDefaultPoolRefs(machineScope)[1].Name, + } + machineScope.ProxmoxMachine.Spec.Network = &infrav1.NetworkSpec{ + NetworkDevices: []infrav1.NetworkDevice{defaultNic}, } - t.Cleanup(func() { getISOInjector = defaultISOInjector }) + // create missing defaultPoolV6 and ipAddresses + require.NoError(t, machineScope.IPAMHelper.CreateOrUpdateInClusterIPPool(context.Background())) + createIPv4AddressResource(t, kubeClient, machineScope, infrav1.DefaultNetworkDevice, "10.0.0.254", &defaultPool) + createIPv6AddressResource(t, kubeClient, machineScope, infrav1.DefaultNetworkDevice, "2001:db8::2", &defaultPoolV6) + + // perform test requeue, err := reconcileBootstrapData(context.Background(), machineScope) require.NoError(t, err) require.False(t, requeue) - require.False(t, conditions.Has(machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition)) + require.Equal(t, infrav1.WaitingForVMPowerUpReason, conditions.GetReason(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition)) require.True(t, *machineScope.ProxmoxMachine.Status.BootstrapDataProvided) + + // Test if generated data is equal + networkConfigData := getNetworkConfigDataFromVM(t, *networkDataPtr) + require.True(t, len(networkConfigData) == 1) + require.True(t, len(networkConfigData[0].IPConfigs) == 2) + ipConfigs := networkConfigData[0].IPConfigs + require.Equal(t, "10.0.0.1", ipConfigs[0].Gateway) + require.Equal(t, "10.0.0.254/24", ipConfigs[0].IPAddress) + require.Equal(t, "2001:db8::1", ipConfigs[1].Gateway) + require.Equal(t, "2001:db8::2/96", ipConfigs[1].IPAddress) } func TestReconcileBootstrapData_DualStack_AdditionalDevices(t *testing.T) { - machineScope, _, kubeClient := setupReconcilerTest(t) - machineScope.InfraCluster.ProxmoxCluster.Spec.IPv6Config = &infrav1alpha1.IPConfigSpec{ - Addresses: []string{"2001:db8::/64"}, - Prefix: 64, + machineScope, _, kubeClient := setupReconcilerTestWithCondition(t, infrav1.WaitingForBootstrapDataReconcilationReason) + setupVMWithMetadata(t, machineScope, "virtio=A6:23:64:4D:84:CB,bridge=vmbr0", "virtio=AA:23:64:4D:84:CD,bridge=vmbr1") + networkDataPtr := setupFakeIsoInjector(t) + createBootstrapSecret(t, kubeClient, machineScope, cloudinit.FormatCloudConfig) + + proxmoxCluster := machineScope.InfraCluster.ProxmoxCluster + proxmoxCluster.Spec.IPv6Config = &infrav1.IPConfigSpec{ + Addresses: []string{"2001:db8::/96"}, + Prefix: 96, Gateway: "2001:db8::1", } + require.NoError(t, kubeClient.Update(context.Background(), proxmoxCluster)) + proxmoxCluster.Status.InClusterIPPoolRef = []corev1.LocalObjectReference{ + {Name: "test-v4-icip"}, + {Name: "test-v6-icip"}, + } + require.NoError(t, kubeClient.Status().Update(context.Background(), proxmoxCluster)) - machineScope.ProxmoxMachine.Spec.Network = &infrav1alpha1.NetworkSpec{ - Default: &infrav1alpha1.NetworkDevice{ - Bridge: "vmbr0", - Model: ptr.To("virtio"), - }, - AdditionalDevices: []infrav1alpha1.AdditionalNetworkDevice{ + // NetworkSetup for default pools + defaultPool := corev1.TypedLocalObjectReference{APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: InClusterIPPool, + Name: getDefaultPoolRefs(machineScope)[0].Name, + } + defaultPoolV6 := corev1.TypedLocalObjectReference{APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: InClusterIPPool, + Name: getDefaultPoolRefs(machineScope)[1].Name, + } + machineScope.ProxmoxMachine.Spec.Network = &infrav1.NetworkSpec{ + NetworkDevices: []infrav1.NetworkDevice{defaultNic}, + } + + // create missing defaultPoolV6 + require.NoError(t, machineScope.IPAMHelper.CreateOrUpdateInClusterIPPool(context.Background())) + + extraPool0 := corev1.TypedLocalObjectReference{APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: GlobalInClusterIPPool, + Name: "extrapool0", + } + extraPool1 := corev1.TypedLocalObjectReference{APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: GlobalInClusterIPPool, + Name: "extrapool1", + } + machineScope.ProxmoxMachine.Spec.Network = &infrav1.NetworkSpec{ + NetworkDevices: []infrav1.NetworkDevice{ + defaultNic, { - NetworkDevice: infrav1alpha1.NetworkDevice{Bridge: "vmbr1", Model: ptr.To("virtio"), - IPPoolConfig: infrav1alpha1.IPPoolConfig{ - IPv6PoolRef: &corev1.TypedLocalObjectReference{ - APIGroup: ptr.To("ipam.cluster.x-k8s.io"), - Kind: "GlobalInClusterIPPool", - Name: "sample", - }, - IPv4PoolRef: &corev1.TypedLocalObjectReference{ - APIGroup: ptr.To("ipam.cluster.x-k8s.io"), - Kind: "InClusterIPPool", - Name: "sample", - }, + Bridge: ptr.To("vmbr1"), + Model: ptr.To("virtio"), + Name: ptr.To("net1"), + InterfaceConfig: infrav1.InterfaceConfig{ + DNSServers: []string{"1.2.3.4"}, + IPPoolRef: []corev1.TypedLocalObjectReference{ + extraPool0, + extraPool1, }, - DNSServers: []string{"1.2.3.4"}}, - Name: "net1", - InterfaceConfig: infrav1alpha1.InterfaceConfig{}, + }, }, }, } - vm := newVMWithNets("virtio=A6:23:64:4D:84:CB,bridge=vmbr0", "virtio=AA:23:64:4D:84:CD,bridge=vmbr1") - vm.VirtualMachineConfig.SMBios1 = biosUUID - machineScope.SetVirtualMachine(vm) - machineScope.ProxmoxMachine.Status.IPAddresses = map[string]infrav1alpha1.IPAddress{infrav1alpha1.DefaultNetworkDevice: {IPV4: "10.10.10.10", IPV6: "2001:db8::2"}, "net1": {IPV4: "10.0.0.10", IPV6: "2001:db8::9"}} + // Create missing ip addresses and pools. createIPPools(t, kubeClient, machineScope) - createIP4AddressResource(t, kubeClient, machineScope, infrav1alpha1.DefaultNetworkDevice, "10.10.10.10") - createIP6AddressResource(t, kubeClient, machineScope, infrav1alpha1.DefaultNetworkDevice, "2001:db8::2") - createIP4AddressResource(t, kubeClient, machineScope, "net1", "10.0.0.10") - createIP6AddressResource(t, kubeClient, machineScope, "net1", "2001:db8::9") - createBootstrapSecret(t, kubeClient, machineScope, cloudinit.FormatCloudConfig) - getISOInjector = func(_ *proxmox.VirtualMachine, _ []byte, _, _ cloudinit.Renderer) isoInjector { - return FakeISOInjector{} - } - t.Cleanup(func() { getISOInjector = defaultISOInjector }) + createIPv4AddressResource(t, kubeClient, machineScope, infrav1.DefaultNetworkDevice, "10.10.10.10", &defaultPool) + createIPv6AddressResource(t, kubeClient, machineScope, infrav1.DefaultNetworkDevice, "2001:db8::2", &defaultPoolV6) + createIPv4AddressResource(t, kubeClient, machineScope, "net1", "10.0.0.10", &extraPool0) + createIPv6AddressResource(t, kubeClient, machineScope, "net1", "2001:db8::9", &extraPool1) + // Perform test. requeue, err := reconcileBootstrapData(context.Background(), machineScope) require.NoError(t, err) require.False(t, requeue) - require.False(t, conditions.Has(machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition)) + require.Equal(t, infrav1.WaitingForVMPowerUpReason, conditions.GetReason(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition)) require.True(t, *machineScope.ProxmoxMachine.Status.BootstrapDataProvided) + + // Test if generated data is equal. + networkConfigData := getNetworkConfigDataFromVM(t, *networkDataPtr) + require.True(t, len(networkConfigData) == 2) + require.True(t, len(networkConfigData[0].IPConfigs) == 2) + require.True(t, len(networkConfigData[1].IPConfigs) == 2) + ipConfigs := networkConfigData[0].IPConfigs + require.Equal(t, "10.0.0.1", ipConfigs[0].Gateway) + require.Equal(t, "10.10.10.10/24", ipConfigs[0].IPAddress) + require.Equal(t, "2001:db8::1", ipConfigs[1].Gateway) + require.Equal(t, "2001:db8::2/96", ipConfigs[1].IPAddress) + ipConfigs = networkConfigData[1].IPConfigs + require.Equal(t, "", ipConfigs[0].Gateway) // No Gateway assigned + require.Equal(t, "10.0.0.10/0", ipConfigs[0].IPAddress) + require.Equal(t, "", ipConfigs[1].Gateway) // No Gateway assigned + require.Equal(t, "2001:db8::9/0", ipConfigs[1].IPAddress) } func TestReconcileBootstrapData_VirtualDevices_VRF(t *testing.T) { - machineScope, _, kubeClient := setupReconcilerTest(t) + machineScope, _, kubeClient := setupReconcilerTestWithCondition(t, infrav1.WaitingForBootstrapDataReconcilationReason) + setupVMWithMetadata(t, machineScope, "virtio=A6:23:64:4D:84:CB,bridge=vmbr0", "virtio=AA:23:64:4D:84:CD,bridge=vmbr1") + networkDataPtr := setupFakeIsoInjector(t) + createBootstrapSecret(t, kubeClient, machineScope, cloudinit.FormatCloudConfig) + + // NetworkSetup for default pools + defaultPool := corev1.TypedLocalObjectReference{APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: InClusterIPPool, + Name: getDefaultPoolRefs(machineScope)[0].Name, + } - machineScope.ProxmoxMachine.Spec.Network = &infrav1alpha1.NetworkSpec{ - Default: &infrav1alpha1.NetworkDevice{Bridge: "vmbr0", Model: ptr.To("virtio")}, - VirtualNetworkDevices: infrav1alpha1.VirtualNetworkDevices{ - VRFs: []infrav1alpha1.VRFDevice{{ - Interfaces: []string{"net1"}, + extraPool0 := corev1.TypedLocalObjectReference{APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: GlobalInClusterIPPool, + Name: "extrapool0", + } + extraPool1 := corev1.TypedLocalObjectReference{APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: GlobalInClusterIPPool, + Name: "extrapool1", + } + machineScope.ProxmoxMachine.Spec.Network = &infrav1.NetworkSpec{ + VirtualNetworkDevices: infrav1.VirtualNetworkDevices{ + VRFs: []infrav1.VRFDevice{{ + Interfaces: []infrav1.NetName{ptr.To("net1")}, Name: "vrf-blue", Table: 500, }}, }, - AdditionalDevices: []infrav1alpha1.AdditionalNetworkDevice{ + NetworkDevices: []infrav1.NetworkDevice{ { - NetworkDevice: infrav1alpha1.NetworkDevice{Bridge: "vmbr1", Model: ptr.To("virtio"), - IPPoolConfig: infrav1alpha1.IPPoolConfig{ - IPv4PoolRef: &corev1.TypedLocalObjectReference{ - APIGroup: ptr.To("ipam.cluster.x-k8s.io"), - Kind: "InClusterIPPool", - Name: "sample", - }}, + Bridge: ptr.To("vmbr0"), + Model: ptr.To("virtio"), + Name: ptr.To("net0"), + InterfaceConfig: infrav1.InterfaceConfig{ DNSServers: []string{"1.2.3.4"}, + IPPoolRef: []corev1.TypedLocalObjectReference{extraPool0}, + }, + }, + { + Bridge: ptr.To("vmbr1"), + Model: ptr.To("virtio"), + Name: ptr.To("net1"), + InterfaceConfig: infrav1.InterfaceConfig{ + DNSServers: []string{"1.2.3.4"}, + IPPoolRef: []corev1.TypedLocalObjectReference{extraPool1}, }, - Name: "net1", - InterfaceConfig: infrav1alpha1.InterfaceConfig{}, }, }, } - vm := newVMWithNets("virtio=A6:23:64:4D:84:CB,bridge=vmbr0", "virtio=AA:23:64:4D:84:CD,bridge=vmbr1") - vm.VirtualMachineConfig.SMBios1 = biosUUID - machineScope.SetVirtualMachine(vm) - machineScope.ProxmoxMachine.Status.IPAddresses = map[string]infrav1alpha1.IPAddress{infrav1alpha1.DefaultNetworkDevice: {IPV4: "10.10.10.10"}, "net1": {IPV4: "10.100.10.10"}} - createIP4AddressResource(t, kubeClient, machineScope, infrav1alpha1.DefaultNetworkDevice, "10.10.10.10") - createIP4AddressResource(t, kubeClient, machineScope, "net1", "10.100.10.10") - createBootstrapSecret(t, kubeClient, machineScope, cloudinit.FormatCloudConfig) - getISOInjector = func(_ *proxmox.VirtualMachine, _ []byte, _, _ cloudinit.Renderer) isoInjector { - return FakeISOInjector{} - } - t.Cleanup(func() { getISOInjector = defaultISOInjector }) + createIPPools(t, kubeClient, machineScope) + createIPv4AddressResource(t, kubeClient, machineScope, infrav1.DefaultNetworkDevice, "10.10.10.10", &defaultPool) + createIPv4AddressResource(t, kubeClient, machineScope, infrav1.DefaultNetworkDevice, "10.20.10.10", &extraPool0) + createIPv4AddressResource(t, kubeClient, machineScope, "net1", "10.100.10.10", &extraPool1) requeue, err := reconcileBootstrapData(context.Background(), machineScope) require.NoError(t, err) require.False(t, requeue) - require.False(t, conditions.Has(machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition)) + require.True(t, conditions.Has(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition)) require.True(t, *machineScope.ProxmoxMachine.Status.BootstrapDataProvided) + + // Test if generated data is equal. + networkConfigData := getNetworkConfigDataFromVM(t, *networkDataPtr) + require.True(t, len(networkConfigData) == 3) + require.True(t, len(networkConfigData[0].IPConfigs) == 2) + require.True(t, len(networkConfigData[1].IPConfigs) == 1) + require.True(t, len(networkConfigData[2].IPConfigs) == 0) + require.True(t, len(networkConfigData[2].Interfaces) == 1) + ipConfigs := networkConfigData[0].IPConfigs + require.Equal(t, "10.0.0.1", ipConfigs[0].Gateway) + require.Equal(t, "10.10.10.10/24", ipConfigs[0].IPAddress) + require.Equal(t, "10.20.10.10/0", ipConfigs[1].IPAddress) + ipConfigs = networkConfigData[1].IPConfigs + require.Equal(t, "10.100.10.10/0", ipConfigs[0].IPAddress) + // VRF Data + require.Equal(t, "vrf", networkConfigData[2].Type) + require.Equal(t, "vrf-blue", networkConfigData[2].Name) + require.Equal(t, "eth1", networkConfigData[2].Interfaces[0]) + require.Equal(t, int32(500), networkConfigData[2].Table) } func TestVMHasMacAddress(t *testing.T) { @@ -468,54 +636,40 @@ func TestVMHasMacAddress(t *testing.T) { } func TestReconcileBootstrapDataMissingSecret(t *testing.T) { - machineScope, _, kubeClient := setupReconcilerTest(t) - vm := newVMWithNets("virtio=A6:23:64:4D:84:CB,bridge=vmbr0") - vm.VirtualMachineConfig.SMBios1 = biosUUID - machineScope.SetVirtualMachine(vm) + machineScope, _, kubeClient := setupReconcilerTestWithCondition(t, infrav1.WaitingForBootstrapDataReconcilationReason) + setupVMWithMetadata(t, machineScope, "virtio=A6:23:64:4D:84:CB,bridge=vmbr0") - machineScope.ProxmoxMachine.Status.IPAddresses = map[string]infrav1alpha1.IPAddress{infrav1alpha1.DefaultNetworkDevice: {IPV4: "10.10.10.10"}} - createIP4AddressResource(t, kubeClient, machineScope, infrav1alpha1.DefaultNetworkDevice, "10.10.10.10") + createIPv4AddressResource(t, kubeClient, machineScope, infrav1.DefaultNetworkDevice, "10.10.10.10", nil) requeue, err := reconcileBootstrapData(context.Background(), machineScope) require.Error(t, err) require.False(t, requeue) - require.True(t, conditions.Has(machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition)) - require.True(t, conditions.IsFalse(machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition)) - require.True(t, conditions.GetReason(machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition) == infrav1alpha1.CloningFailedReason) + require.True(t, conditions.Has(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition)) + require.True(t, conditions.IsFalse(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition)) + require.True(t, conditions.GetReason(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition) == infrav1.CloningFailedReason) } func TestReconcileBootstrapDataMissingNetworkConfig(t *testing.T) { - machineScope, _, kubeClient := setupReconcilerTest(t) - vm := newVMWithNets("virtio=A6:23:64:4D:84:CB,bridge=vmbr0") - vm.VirtualMachineConfig.SMBios1 = biosUUID - machineScope.SetVirtualMachine(vm) - - machineScope.ProxmoxMachine.Status.IPAddresses = map[string]infrav1alpha1.IPAddress{infrav1alpha1.DefaultNetworkDevice: {IPV4: "10.10.10.10"}} + machineScope, _, kubeClient := setupReconcilerTestWithCondition(t, infrav1.WaitingForBootstrapDataReconcilationReason) + setupVMWithMetadata(t, machineScope, "virtio=A6:23:64:4D:84:CB,bridge=vmbr0") createBootstrapSecret(t, kubeClient, machineScope, cloudinit.FormatCloudConfig) requeue, err := reconcileBootstrapData(context.Background(), machineScope) require.Error(t, err) require.False(t, requeue) - require.True(t, conditions.Has(machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition)) - require.True(t, conditions.IsFalse(machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition)) - require.True(t, conditions.GetReason(machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition) == infrav1alpha1.WaitingForStaticIPAllocationReason) + require.True(t, conditions.Has(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition)) + require.True(t, conditions.IsFalse(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition)) + require.Equal(t, infrav1.VMProvisionFailedReason, conditions.GetReason(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition)) + require.ErrorContains(t, err, "network config data is not set") } func TestReconcileBootstrapData_Format_CloudConfig(t *testing.T) { - machineScope, _, kubeClient := setupReconcilerTest(t) - - vm := newVMWithNets("virtio=A6:23:64:4D:84:CB,bridge=vmbr0") - vm.VirtualMachineConfig.SMBios1 = biosUUID - machineScope.SetVirtualMachine(vm) - machineScope.ProxmoxMachine.Status.IPAddresses = map[string]infrav1alpha1.IPAddress{infrav1alpha1.DefaultNetworkDevice: {IPV4: "10.10.10.10"}} - createIP4AddressResource(t, kubeClient, machineScope, infrav1alpha1.DefaultNetworkDevice, "10.10.10.10") + machineScope, _, kubeClient := setupReconcilerTestWithCondition(t, infrav1.WaitingForBootstrapDataReconcilationReason) + setupVMWithMetadata(t, machineScope, "virtio=A6:23:64:4D:84:CB,bridge=vmbr0") createBootstrapSecret(t, kubeClient, machineScope, cloudinit.FormatCloudConfig) - machineScope.SetVirtualMachine(vm) + createIPv4AddressResource(t, kubeClient, machineScope, infrav1.DefaultNetworkDevice, "10.10.10.10", nil) - getISOInjector = func(_ *proxmox.VirtualMachine, _ []byte, _, _ cloudinit.Renderer) isoInjector { - return FakeISOInjector{} - } - t.Cleanup(func() { getISOInjector = defaultISOInjector }) + setupFakeIsoInjector(t) // test defaulting of format to cloud-config requeue, err := reconcileBootstrapData(context.Background(), machineScope) @@ -530,15 +684,11 @@ func TestReconcileBootstrapData_Format_CloudConfig(t *testing.T) { } func TestReconcileBootstrapData_Format_Ignition(t *testing.T) { - machineScope, _, kubeClient := setupReconcilerTest(t) - - vm := newVMWithNets("virtio=A6:23:64:4D:84:CB,bridge=vmbr0") - vm.VirtualMachineConfig.SMBios1 = biosUUID - machineScope.SetVirtualMachine(vm) - machineScope.ProxmoxMachine.Status.IPAddresses = map[string]infrav1alpha1.IPAddress{infrav1alpha1.DefaultNetworkDevice: {IPV4: "10.10.10.10"}} - createIP4AddressResource(t, kubeClient, machineScope, infrav1alpha1.DefaultNetworkDevice, "10.10.10.10") + machineScope, _, kubeClient := setupReconcilerTestWithCondition(t, infrav1.WaitingForBootstrapDataReconcilationReason) + setupVMWithMetadata(t, machineScope, "virtio=A6:23:64:4D:84:CB,bridge=vmbr0") createBootstrapSecret(t, kubeClient, machineScope, ignition.FormatIgnition) - machineScope.SetVirtualMachine(vm) + + createIPv4AddressResource(t, kubeClient, machineScope, infrav1.DefaultNetworkDevice, "10.10.10.10", nil) getIgnitionISOInjector = func(_ *proxmox.VirtualMachine, _ cloudinit.Renderer, _ *ignition.Enricher) isoInjector { return FakeIgnitionISOInjector{} @@ -575,36 +725,48 @@ func TestIgnitionISOInjector(t *testing.T) { } func TestReconcileBootstrapData_DefaultDeviceIPPoolRef(t *testing.T) { - machineScope, _, kubeClient := setupReconcilerTest(t) - machineScope.ProxmoxMachine.Spec.Network = &infrav1alpha1.NetworkSpec{ - Default: &infrav1alpha1.NetworkDevice{ - Bridge: "vmbr0", + machineScope, _, kubeClient := setupReconcilerTestWithCondition(t, infrav1.WaitingForBootstrapDataReconcilationReason) + setupVMWithMetadata(t, machineScope, "virtio=A6:23:64:4D:84:CB,bridge=vmbr0") + networkDataPtr := setupFakeIsoInjector(t) + createBootstrapSecret(t, kubeClient, machineScope, cloudinit.FormatCloudConfig) + + defaultPool := corev1.TypedLocalObjectReference{APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: InClusterIPPool, + Name: getDefaultPoolRefs(machineScope)[0].Name, + } + extraPool0 := corev1.TypedLocalObjectReference{APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: GlobalInClusterIPPool, + Name: "extrapool0", + } + machineScope.ProxmoxMachine.Spec.Network = &infrav1.NetworkSpec{ + NetworkDevices: []infrav1.NetworkDevice{{ + Bridge: ptr.To("vmbr0"), Model: ptr.To("virtio"), - IPPoolConfig: infrav1alpha1.IPPoolConfig{ - IPv4PoolRef: &corev1.TypedLocalObjectReference{ - APIGroup: ptr.To("ipam.cluster.x-k8s.io"), - Kind: "GlobalInClusterIPPool", - Name: "sample-shared-pool", - }, + Name: ptr.To(infrav1.DefaultNetworkDevice), + InterfaceConfig: infrav1.InterfaceConfig{ + IPPoolRef: []corev1.TypedLocalObjectReference{extraPool0}, }, - }, + }}, } - vm := newVMWithNets("virtio=A6:23:64:4D:84:CB,bridge=vmbr0") - vm.VirtualMachineConfig.SMBios1 = biosUUID - machineScope.SetVirtualMachine(vm) - machineScope.ProxmoxMachine.Status.IPAddresses = map[string]infrav1alpha1.IPAddress{infrav1alpha1.DefaultNetworkDevice: {IPV4: "10.5.10.10"}} - createIP4AddressResource(t, kubeClient, machineScope, infrav1alpha1.DefaultNetworkDevice, "10.5.10.10") - - createBootstrapSecret(t, kubeClient, machineScope, cloudinit.FormatCloudConfig) - getISOInjector = func(_ *proxmox.VirtualMachine, _ []byte, _, _ cloudinit.Renderer) isoInjector { - return FakeISOInjector{} - } - t.Cleanup(func() { getISOInjector = defaultISOInjector }) + createIPPools(t, kubeClient, machineScope) + createIPv4AddressResource(t, kubeClient, machineScope, infrav1.DefaultNetworkDevice, "10.10.10.10", &defaultPool) + createIPv4AddressResource(t, kubeClient, machineScope, infrav1.DefaultNetworkDevice, "10.5.10.10", &extraPool0) + // Perform the test requeue, err := reconcileBootstrapData(context.Background(), machineScope) require.NoError(t, err) require.False(t, requeue) - require.False(t, conditions.Has(machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition)) + require.True(t, conditions.Has(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition)) require.True(t, *machineScope.ProxmoxMachine.Status.BootstrapDataProvided) + + // Test if generated data is equal + networkConfigData := getNetworkConfigDataFromVM(t, *networkDataPtr) + require.True(t, len(networkConfigData) == 1) + require.True(t, len(networkConfigData[0].IPConfigs) == 2) + ipConfigs := networkConfigData[0].IPConfigs + require.Equal(t, "10.0.0.1", ipConfigs[0].Gateway) + require.Equal(t, "10.10.10.10/24", ipConfigs[0].IPAddress) + require.Equal(t, "", ipConfigs[1].Gateway) + require.Equal(t, "10.5.10.10/0", ipConfigs[1].IPAddress) } diff --git a/internal/service/vmservice/delete.go b/internal/service/vmservice/delete.go index c6ec64bb..fd3fde65 100644 --- a/internal/service/vmservice/delete.go +++ b/internal/service/vmservice/delete.go @@ -27,7 +27,7 @@ import ( "github.com/pkg/errors" - infrav1alpha1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" + infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha2" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/proxmox/goproxmox" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/scope" ) @@ -42,10 +42,10 @@ func DeleteVM(ctx context.Context, machineScope *scope.MachineScope) error { // remove machine from cluster status machineScope.InfraCluster.ProxmoxCluster.RemoveNodeLocation(machineScope.Name(), util.IsControlPlaneMachine(machineScope.Machine)) // The VM is deleted so remove the finalizer. - ctrlutil.RemoveFinalizer(machineScope.ProxmoxMachine, infrav1alpha1.MachineFinalizer) + ctrlutil.RemoveFinalizer(machineScope.ProxmoxMachine, infrav1.MachineFinalizer) return machineScope.InfraCluster.PatchObject() } - conditions.MarkFalse(machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition, clusterv1.DeletionFailedReason, clusterv1.ConditionSeverityWarning, "") + conditions.MarkFalse(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition, clusterv1.DeletionFailedReason, clusterv1.ConditionSeverityWarning, "") return err } diff --git a/internal/service/vmservice/delete_test.go b/internal/service/vmservice/delete_test.go index 53a04103..68de8653 100644 --- a/internal/service/vmservice/delete_test.go +++ b/internal/service/vmservice/delete_test.go @@ -25,14 +25,14 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/utils/ptr" - infrav1alpha1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" + infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha2" ) func TestDeleteVM_SuccessNotFound(t *testing.T) { machineScope, proxmoxClient, _ := setupReconcilerTest(t) vm := newRunningVM() machineScope.ProxmoxMachine.Spec.VirtualMachineID = ptr.To(int64(vm.VMID)) - machineScope.InfraCluster.ProxmoxCluster.AddNodeLocation(infrav1alpha1.NodeLocation{ + machineScope.InfraCluster.ProxmoxCluster.AddNodeLocation(infrav1.NodeLocation{ Machine: corev1.LocalObjectReference{Name: machineScope.Name()}, Node: "node1", }, false) diff --git a/internal/service/vmservice/find_test.go b/internal/service/vmservice/find_test.go index 503909ff..3abb620b 100644 --- a/internal/service/vmservice/find_test.go +++ b/internal/service/vmservice/find_test.go @@ -25,7 +25,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/utils/ptr" - infrav1alpha1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" + infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha2" ) func TestFindVM_FindByNodeAndID(t *testing.T) { @@ -45,7 +45,7 @@ func TestFindVM_FindByNodeLocationsAndID(t *testing.T) { machineScope, proxmoxClient, _ := setupReconcilerTest(t) vm := newRunningVM() machineScope.ProxmoxMachine.Spec.VirtualMachineID = ptr.To(int64(vm.VMID)) - machineScope.InfraCluster.ProxmoxCluster.AddNodeLocation(infrav1alpha1.NodeLocation{ + machineScope.InfraCluster.ProxmoxCluster.AddNodeLocation(infrav1.NodeLocation{ Machine: corev1.LocalObjectReference{Name: machineScope.ProxmoxMachine.GetName()}, Node: "node3", }, false) @@ -129,7 +129,7 @@ func TestUpdateVMLocation_UpdateNode(t *testing.T) { vmr := newVMResource() machineScope.ProxmoxMachine.Spec.VirtualMachineID = ptr.To(int64(vm.VMID)) machineScope.ProxmoxMachine.Status.ProxmoxNode = ptr.To("node3") - machineScope.InfraCluster.ProxmoxCluster.AddNodeLocation(infrav1alpha1.NodeLocation{ + machineScope.InfraCluster.ProxmoxCluster.AddNodeLocation(infrav1.NodeLocation{ Machine: corev1.LocalObjectReference{Name: machineScope.Name()}, Node: "node3", }, false) diff --git a/internal/service/vmservice/helpers_test.go b/internal/service/vmservice/helpers_test.go index 034d596f..288c8b4e 100644 --- a/internal/service/vmservice/helpers_test.go +++ b/internal/service/vmservice/helpers_test.go @@ -18,6 +18,7 @@ package vmservice import ( "context" + "encoding/json" "fmt" "net/netip" "testing" @@ -27,6 +28,7 @@ import ( "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + fields "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" "k8s.io/utils/ptr" ipamicv1 "sigs.k8s.io/cluster-api-ipam-provider-in-cluster/api/v1alpha2" @@ -35,18 +37,25 @@ import ( "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - infrav1alpha1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" + infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha2" "github.com/ionos-cloud/cluster-api-provider-proxmox/internal/inject" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/cloudinit" + . "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/consts" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/ignition" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/kubernetes/ipam" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/proxmox/proxmoxtest" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/scope" + "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/types" ) type FakeISOInjector struct { - Error error + Error error + VirtualMachine *proxmox.VirtualMachine + BootstrapData []byte + MetaData cloudinit.Renderer + Network cloudinit.Renderer } func (f FakeISOInjector) Inject(_ context.Context, _ inject.BootstrapDataFormat) error { @@ -61,6 +70,15 @@ func (f FakeIgnitionISOInjector) Inject(_ context.Context, _ inject.BootstrapDat return f.Error } +// setupReconcilerTestWithCondition sets up a reconciler test with a condition for the proxmoxmachiens statemachine. +func setupReconcilerTestWithCondition(t *testing.T, condition string) (*scope.MachineScope, *proxmoxtest.MockClient, client.Client) { + machineScope, mockClient, client := setupReconcilerTest(t) + + conditions.MarkFalse(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition, condition, clusterv1.ConditionSeverityInfo, "") + + return machineScope, mockClient, client +} + // setupReconcilerTest initializes a MachineScope with a mock Proxmox client and a fake controller-runtime client. func setupReconcilerTest(t *testing.T) (*scope.MachineScope, *proxmoxtest.MockClient, client.Client) { cluster := &clusterv1.Cluster{ @@ -74,47 +92,61 @@ func setupReconcilerTest(t *testing.T) (*scope.MachineScope, *proxmoxtest.MockCl ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: metav1.NamespaceDefault, + Labels: map[string]string{ + "cluster.x-k8s.io/cluster-name": "test", + }, }, } - infraCluster := &infrav1alpha1.ProxmoxCluster{ + infraCluster := &infrav1.ProxmoxCluster{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "infrastructure.cluster.x-k8s.io/v1alpha1", + Kind: "ProxmoxCluster", + }, ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: metav1.NamespaceDefault, Finalizers: []string{ - infrav1alpha1.ClusterFinalizer, + infrav1.ClusterFinalizer, }, }, - Spec: infrav1alpha1.ProxmoxClusterSpec{ - IPv4Config: &infrav1alpha1.IPConfigSpec{ + Spec: infrav1.ProxmoxClusterSpec{ + IPv4Config: &infrav1.IPConfigSpec{ Addresses: []string{"10.0.0.10-10.0.0.20"}, Prefix: 24, Gateway: "10.0.0.1", }, DNSServers: []string{"1.2.3.4"}, }, - Status: infrav1alpha1.ProxmoxClusterStatus{ - NodeLocations: &infrav1alpha1.NodeLocations{}, + Status: infrav1.ProxmoxClusterStatus{ + NodeLocations: &infrav1.NodeLocations{}, }, } - infraCluster.Status.InClusterIPPoolRef = []corev1.LocalObjectReference{{Name: ipam.InClusterPoolFormat(infraCluster, infrav1alpha1.IPV4Format)}} + infraCluster.Status.InClusterIPPoolRef = []corev1.LocalObjectReference{{Name: ipam.InClusterPoolFormat(infraCluster, infrav1.IPv4Format)}} - infraMachine := &infrav1alpha1.ProxmoxMachine{ + infraMachine := &infrav1.ProxmoxMachine{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "infrastructure.cluster.x-k8s.io/v1alpha1", + Kind: "ProxmoxMachine", + }, ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: metav1.NamespaceDefault, Finalizers: []string{ - infrav1alpha1.MachineFinalizer, + infrav1.MachineFinalizer, + }, + Labels: map[string]string{ + "cluster.x-k8s.io/cluster-name": "test", }, }, - Spec: infrav1alpha1.ProxmoxMachineSpec{ - VirtualMachineCloneSpec: infrav1alpha1.VirtualMachineCloneSpec{ - TemplateSource: infrav1alpha1.TemplateSource{ - SourceNode: "node1", + Spec: ptr.To(infrav1.ProxmoxMachineSpec{ + VirtualMachineCloneSpec: infrav1.VirtualMachineCloneSpec{ + TemplateSource: infrav1.TemplateSource{ + SourceNode: ptr.To("node1"), TemplateID: ptr.To[int32](123), }, }, - }, + }), } scheme := runtime.NewScheme() @@ -122,20 +154,47 @@ func setupReconcilerTest(t *testing.T) (*scope.MachineScope, *proxmoxtest.MockCl require.NoError(t, clusterv1.AddToScheme(scheme)) require.NoError(t, ipamv1.AddToScheme(scheme)) require.NoError(t, ipamicv1.AddToScheme(scheme)) - require.NoError(t, infrav1alpha1.AddToScheme(scheme)) + require.NoError(t, infrav1.AddToScheme(scheme)) kubeClient := fake.NewClientBuilder(). WithScheme(scheme). WithObjects(cluster, machine, infraCluster, infraMachine). - WithStatusSubresource(&infrav1alpha1.ProxmoxCluster{}, &infrav1alpha1.ProxmoxMachine{}). + WithStatusSubresource(&infrav1.ProxmoxCluster{}, &infrav1.ProxmoxMachine{}). Build() ipamHelper := ipam.NewHelper(kubeClient, infraCluster) logger := logr.Discard() - require.NoError(t, ipamHelper.CreateOrUpdateInClusterIPPool(context.Background())) - mockClient := proxmoxtest.NewMockClient(t) + // fake indexing tests. TODO: Unify + + indexFunc := func(obj client.Object) []string { + return []string{obj.(*ipamv1.IPAddress).Spec.PoolRef.Name} + } + + err := fake.AddIndex(kubeClient, &ipamv1.IPAddress{}, "spec.poolRef.name", indexFunc) + require.NoError(t, err) + + // set up index for ipAddressClaims owner ProxmoxMachine (testing of interfaces) + indexFunc = func(obj client.Object) []string { + var ret = []string{} + + owners := obj.(*ipamv1.IPAddressClaim).ObjectMeta.OwnerReferences + + for _, owner := range owners { + if owner.Kind == infrav1.ProxmoxMachineKind { + ret = append(ret, owner.Name) + } + } + return ret + } + + err = fake.AddIndex(kubeClient, &ipamv1.IPAddressClaim{}, "ipaddressclaim.ownerMachine", indexFunc) + require.NoError(t, err) + + // Create InClusterIPPools after the indexes are set up + require.NoError(t, ipamHelper.CreateOrUpdateInClusterIPPool(context.Background())) + clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{ Client: kubeClient, Logger: &logger, @@ -161,7 +220,7 @@ func setupReconcilerTest(t *testing.T) (*scope.MachineScope, *proxmoxtest.MockCl } func getIPSuffix(addr string) string { - suffix := infrav1alpha1.DefaultSuffix + suffix := infrav1.DefaultSuffix ip := netip.MustParseAddr(addr) if ip.Is6() { suffix += "6" @@ -169,56 +228,211 @@ func getIPSuffix(addr string) string { return suffix } +func createIPAddressResource(t *testing.T, c client.Client, name string, machineScope *scope.MachineScope, ip netip.Prefix, pool *corev1.TypedLocalObjectReference) { + // gateway := netip.MustParsePrefix(fmt.Sprintf("%s/%d", ip, prefix)).Addr().Next().String() + + prefix := ip.Bits() + var gateway string + + if pool != nil { + ipAddrClaim := &ipamv1.IPAddressClaim{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "ipam.cluster.x-k8s.io/v1beta1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: machineScope.Namespace(), + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: machineScope.ProxmoxMachine.APIVersion, + Kind: "ProxmoxMachine", + Name: machineScope.Name(), + }}, + }, + Spec: ipamv1.IPAddressClaimSpec{ + PoolRef: *pool, + }, + } + require.NoError(t, c.Create(context.Background(), ipAddrClaim)) -func createIPAddressResource(t *testing.T, c client.Client, name, namespace, ip string, prefix int) { - obj := &ipamv1.IPAddress{ + poolSpec := getPoolSpec(getIPAddressPool(t, c, machineScope, pool)) + prefix = poolSpec.prefix + gateway = poolSpec.gateway + } + + ipAddr := &ipamv1.IPAddress{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "ipam.cluster.x-k8s.io/v1beta1", + }, ObjectMeta: metav1.ObjectMeta{ Name: name, - Namespace: namespace, + Namespace: machineScope.Namespace(), }, Spec: ipamv1.IPAddressSpec{ - Address: ip, + Address: ip.Addr().String(), Prefix: prefix, - Gateway: netip.MustParsePrefix(fmt.Sprintf("%s/%d", ip, prefix)).Addr().Next().String(), + Gateway: gateway, + PoolRef: ptr.Deref(pool, corev1.TypedLocalObjectReference{}), }, } - require.NoError(t, c.Create(context.Background(), obj)) + require.NoError(t, c.Create(context.Background(), ipAddr)) } -func createIP4AddressResource(t *testing.T, c client.Client, machineScope *scope.MachineScope, device, ip string) { +func createIPv4AddressResource(t *testing.T, c client.Client, machineScope *scope.MachineScope, device, ip string, pool *corev1.TypedLocalObjectReference) { require.Truef(t, netip.MustParseAddr(ip).Is4(), "%s is not a valid ipv4 address", ip) - name := formatIPAddressName(machineScope.Name(), device) + poolName := ptr.Deref(pool, corev1.TypedLocalObjectReference{Name: "dummy"}).Name + name := formatIPAddressName(machineScope.Name(), poolName, device) name = fmt.Sprintf("%s-%s", name, getIPSuffix(ip)) - createIPAddressResource(t, c, name, machineScope.Namespace(), ip, 24) + createIPAddressResource(t, c, name, machineScope, netip.MustParsePrefix(ip+"/24"), pool) } -func createIP6AddressResource(t *testing.T, c client.Client, machineScope *scope.MachineScope, device, ip string) { +func createIPv6AddressResource(t *testing.T, c client.Client, machineScope *scope.MachineScope, device, ip string, pool *corev1.TypedLocalObjectReference) { require.Truef(t, netip.MustParseAddr(ip).Is6(), "%s is not a valid ipv6 address", ip) - name := formatIPAddressName(machineScope.Name(), device) + poolName := ptr.Deref(pool, corev1.TypedLocalObjectReference{Name: "dummyv6"}).Name + name := formatIPAddressName(machineScope.Name(), poolName, device) name = fmt.Sprintf("%s-%s", name, getIPSuffix(ip)) - createIPAddressResource(t, c, name, machineScope.Namespace(), ip, 64) + createIPAddressResource(t, c, name, machineScope, netip.MustParsePrefix(ip+"/64"), pool) +} + +func createIPAddressesForMachine(t *testing.T, c client.Client, machineScope *scope.MachineScope, ipAddresses []netip.Prefix) { + ipCount := 0 + for _, device := range ptr.Deref(machineScope.ProxmoxMachine.Spec.Network, infrav1.NetworkSpec{}).NetworkDevices { + for j, poolRef := range device.IPPoolRef { + // todo: unify with ipam + ipName := fmt.Sprintf("%s-%s-%02d-%s", machineScope.ProxmoxMachine.GetName(), *device.Name, j, infrav1.DefaultSuffix) + createIPAddressResource(t, c, ipName, machineScope, ipAddresses[ipCount], &poolRef) + ipCount++ + } + } } func createIPPools(t *testing.T, c client.Client, machineScope *scope.MachineScope) { - for _, dev := range machineScope.ProxmoxMachine.Spec.Network.AdditionalDevices { - poolRef := dev.IPv4PoolRef - if poolRef == nil { - poolRef = dev.IPv6PoolRef + for _, dev := range machineScope.ProxmoxMachine.Spec.Network.NetworkDevices { + for _, poolRef := range dev.IPPoolRef { + createOrUpdateIPPool(t, c, machineScope, &poolRef, nil) } + } +} - var obj client.Object +func createOrUpdateIPPool(t *testing.T, c client.Client, machineScope *scope.MachineScope, poolRef *corev1.TypedLocalObjectReference, pool client.Object) *corev1.TypedLocalObjectReference { + // literally nothing to do + if pool == nil && poolRef == nil { + return nil + } + + if pool == nil { switch poolRef.Kind { - case "InClusterIPPool": - obj = &ipamicv1.InClusterIPPool{} - obj.SetNamespace(machineScope.Namespace()) - case "GlobalInClusterIPPool": - obj = &ipamicv1.GlobalInClusterIPPool{} + case InClusterIPPool: + pool = &ipamicv1.InClusterIPPool{TypeMeta: metav1.TypeMeta{Kind: InClusterIPPool, APIVersion: ipamicv1.GroupVersion.String()}} + pool.SetNamespace(machineScope.Namespace()) + case GlobalInClusterIPPool: + pool = &ipamicv1.GlobalInClusterIPPool{TypeMeta: metav1.TypeMeta{Kind: GlobalInClusterIPPool, APIVersion: ipamicv1.GroupVersion.String()}} + } + pool.SetName(poolRef.Name) + } + + if poolRef == nil { + poolRef = &corev1.TypedLocalObjectReference{ + Name: pool.GetName(), + Kind: pool.GetObjectKind().GroupVersionKind().Kind, + APIGroup: ptr.To(pool.GetObjectKind().GroupVersionKind().Group), } - obj.SetName(poolRef.Name) - require.NoError(t, c.Create(context.Background(), obj)) } + + desired := pool.DeepCopyObject() + + _, err := controllerutil.CreateOrUpdate(context.Background(), c, pool, func() error { + // TODO: Metric change in annotations + if pool.GetObjectKind().GroupVersionKind().Kind == InClusterIPPool { + pool.(*ipamicv1.InClusterIPPool).Spec = desired.(*ipamicv1.InClusterIPPool).Spec + } else if pool.GetObjectKind().GroupVersionKind().Kind == GlobalInClusterIPPool { + pool.(*ipamicv1.GlobalInClusterIPPool).Spec = desired.(*ipamicv1.GlobalInClusterIPPool).Spec + } + return nil + }, + ) + + require.NoError(t, err) + + return poolRef +} + +// todo: ZONES? +func getDefaultPoolRefs(machineScope *scope.MachineScope) []corev1.LocalObjectReference { + cluster := machineScope.InfraCluster.ProxmoxCluster + + return cluster.Status.InClusterIPPoolRef +} + +func getPoolSpec(pool client.Object) struct { + gateway string + prefix int +} { + var gateway string + var prefix int + if pool.GetObjectKind().GroupVersionKind().Kind == InClusterIPPool { + prefix = pool.(*ipamicv1.InClusterIPPool).Spec.Prefix + gateway = pool.(*ipamicv1.InClusterIPPool).Spec.Gateway + } else if pool.GetObjectKind().GroupVersionKind().Kind == GlobalInClusterIPPool { + prefix = pool.(*ipamicv1.GlobalInClusterIPPool).Spec.Prefix + gateway = pool.(*ipamicv1.GlobalInClusterIPPool).Spec.Gateway + } + + return struct { + gateway string + prefix int + }{gateway: gateway, prefix: prefix} +} + +func getIPAddressPool(t *testing.T, c client.Client, machineScope *scope.MachineScope, poolRef *corev1.TypedLocalObjectReference) client.Object { + var obj client.Object + var err error + + if poolRef.Kind == InClusterIPPool { + obj, err = machineScope.IPAMHelper.GetInClusterIPPool(context.Background(), poolRef) + } else if poolRef.Kind == GlobalInClusterIPPool { + obj, err = machineScope.IPAMHelper.GetGlobalInClusterIPPool(context.Background(), poolRef) + } + + require.NoError(t, err) + return obj +} + +func getIPAddressClaims(t *testing.T, c client.Client, machineScope *scope.MachineScope) map[string]*[]ipamv1.IPAddressClaim { + ipAddressClaims := &ipamv1.IPAddressClaimList{} + + fieldSelector, _ := fields.ParseSelector("ipaddressclaim.ownerMachine=" + machineScope.Name()) + + listOptions := client.ListOptions{FieldSelector: fieldSelector} + c.List(context.Background(), ipAddressClaims, &listOptions) + + claimMap := make(map[string]*[]ipamv1.IPAddressClaim) + + for _, claim := range ipAddressClaims.Items { + pool := claim.Spec.PoolRef.Name + + perPoolClaims := ptr.Deref(claimMap[pool], []ipamv1.IPAddressClaim{}) + perPoolClaims = append(perPoolClaims, claim) + claimMap[pool] = &perPoolClaims + } + + return claimMap +} + +func getIPAddressClaimsPerPool(t *testing.T, c client.Client, machineScope *scope.MachineScope, pool string) *[]ipamv1.IPAddressClaim { + ipAddressClaims := getIPAddressClaims(t, c, machineScope) + return ipAddressClaims[pool] +} + +func getNetworkConfigDataFromVM(t *testing.T, jsonData []byte) []types.NetworkConfigData { + var networkConfigData []types.NetworkConfigData + + err := json.Unmarshal(jsonData, &networkConfigData) + + require.NoError(t, err) + + return networkConfigData } func createBootstrapSecret(t *testing.T, c client.Client, machineScope *scope.MachineScope, format string) { diff --git a/internal/service/vmservice/ip.go b/internal/service/vmservice/ip.go index 78dee80c..3d5f7879 100644 --- a/internal/service/vmservice/ip.go +++ b/internal/service/vmservice/ip.go @@ -20,191 +20,215 @@ import ( "context" "fmt" "net/netip" + "reflect" + "slices" "strconv" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/utils/ptr" + ipamicv1 "sigs.k8s.io/cluster-api-ipam-provider-in-cluster/api/v1alpha2" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" - "sigs.k8s.io/controller-runtime/pkg/client" - infrav1alpha1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" + infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha2" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/scope" ) func reconcileIPAddresses(ctx context.Context, machineScope *scope.MachineScope) (requeue bool, err error) { - if machineScope.ProxmoxMachine.Status.IPAddresses != nil { - // skip machine has IpAddress already. + if conditions.GetReason(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition) != infrav1.WaitingForStaticIPAllocationReason { + // Machine is in the wrong state to reconcile, we only reconcile VMs Waiting for IP Address assignment return false, nil } + machineScope.Logger.V(4).Info("reconciling IPAddresses.") - conditions.MarkFalse(machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition, infrav1alpha1.WaitingForStaticIPAllocationReason, clusterv1.ConditionSeverityInfo, "") - addresses := make(map[string]infrav1alpha1.IPAddress) + // TODO: This datastructure is less bad, but still bad + netPoolAddresses := make(map[string]map[corev1.TypedLocalObjectReference][]ipamv1.IPAddress) - // default device. - if requeue, err = handleDefaultDevice(ctx, machineScope, addresses); err != nil || requeue { - return true, errors.Wrap(err, "unable to handle default device") + if machineScope.ProxmoxMachine.Spec.Network != nil { + if requeue, err = handleDevices(ctx, machineScope, netPoolAddresses); err != nil || requeue { + if err == nil { + return true, errors.Wrap(err, "requeuing network reconcillation") + } else { + return true, errors.Wrap(err, "unable to handle network devices") + } + } } - if machineScope.ProxmoxMachine.Spec.Network != nil { - if requeue, err = handleAdditionalDevices(ctx, machineScope, addresses); err != nil || requeue { - return true, errors.Wrap(err, "unable to handle additional devices") + // TODO: move to own state machine stage. Doesn't belong here, really + defaultDevicePools := netPoolAddresses[infrav1.DefaultNetworkDevice] + defaultPools := machineScope.InfraCluster.ProxmoxCluster.Status.InClusterIPPoolRef + for _, pool := range defaultPools { + poolRef := corev1.TypedLocalObjectReference{ + Name: pool.Name, + Kind: reflect.ValueOf(ipamicv1.InClusterIPPool{}).Type().Name(), + APIGroup: ptr.To(ipamicv1.GroupVersion.String()), + } + for defaultPool, ipAddresses := range defaultDevicePools { + if reflect.DeepEqual(defaultPool, poolRef) { + // Todo: This is not necessarily the default IP + setVMIPAddressTag(ctx, machineScope, ipAddresses[0]) + } } } - // update the status.IpAddr. + // update status.IpAddr. + // TODO: This datastructure should be redundant. Too many loops too + statusAddresses := make(map[string]*infrav1.IPAddresses, len(netPoolAddresses)) + for net, pools := range netPoolAddresses { + for _, ips := range pools { + for _, ip := range ips { + if _, e := statusAddresses[net]; !e { + statusAddresses[net] = new(infrav1.IPAddresses) + } + if isIPv4(ip.Spec.Address) { + statusAddresses[net].IPv4 = append(statusAddresses[net].IPv4, ip.Spec.Address) + } else { + statusAddresses[net].IPv6 = append(statusAddresses[net].IPv6, ip.Spec.Address) + } + } + } + } machineScope.Logger.V(4).Info("updating ProxmoxMachine.status.ipAddresses.") - machineScope.ProxmoxMachine.Status.IPAddresses = addresses + machineScope.ProxmoxMachine.Status.IPAddresses = statusAddresses + + conditions.MarkFalse(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition, infrav1.WaitingForBootstrapDataReconcilationReason, clusterv1.ConditionSeverityInfo, "") return true, nil } -func findIPAddress(ctx context.Context, machineScope *scope.MachineScope, device string) (*ipamv1.IPAddress, error) { - key := client.ObjectKey{ - Namespace: machineScope.Namespace(), - Name: formatIPAddressName(machineScope.Name(), device), +// Todo: add tagging to its own stage. +func setVMIPAddressTag(ctx context.Context, machineScope *scope.MachineScope, ipAddress ipamv1.IPAddress) (bool, error) { + // format ipTag as `ip_net0_` + // to add it to the VM. + ipTag := fmt.Sprintf("ip_%s_%s", infrav1.DefaultNetworkDevice, ipAddress.Spec.Address) + + requeue := false + // TODO: IPv6 tag? + // Add ipv4 tag if the Virtual Machine doesn't have it. + if vm := machineScope.VirtualMachine; !vm.HasTag(ipTag) && isIPv4(ipAddress.Spec.Address) { + machineScope.Logger.V(4).Info("adding virtual machine ip tag.", "ip", ipAddress.Spec.Address) + t, err := machineScope.InfraCluster.ProxmoxClient.TagVM(ctx, vm, ipTag) + if err != nil { + return false, errors.Wrapf(err, "unable to add Ip tag to VirtualMachine %s", machineScope.Name()) + } + machineScope.ProxmoxMachine.Status.TaskRef = ptr.To(string(t.UPID)) + requeue = true } - return machineScope.IPAMHelper.GetIPAddress(ctx, key) + + return requeue, nil } -func findIPAddressGatewayMetric(ctx context.Context, machineScope *scope.MachineScope, ipAddress *ipamv1.IPAddress) (*uint32, error) { +// Todo: This function is only called in a helper. +func formatIPAddressName(name, pool, device string) string { + return fmt.Sprintf("%s-%s-%s", name, pool, device) +} + +// findIPAddress returns all IPAddresses owned by a pool and a machine. +func findIPAddress(ctx context.Context, poolRef *corev1.TypedLocalObjectReference, machineScope *scope.MachineScope) ([]ipamv1.IPAddress, error) { + return machineScope.IPAMHelper.GetIPAddressV2(ctx, *poolRef, machineScope.ProxmoxMachine) +} + +func findIPAddressGatewayMetric(ctx context.Context, machineScope *scope.MachineScope, ipAddress *ipamv1.IPAddress) (*int32, error) { annotations, err := machineScope.IPAMHelper.GetIPPoolAnnotations(ctx, ipAddress) if err != nil { return nil, err } - var rv *uint32 + var rv *int32 if s, exists := annotations["metric"]; exists { - metric, err := strconv.ParseUint(s, 0, 32) + metric, err := strconv.ParseInt(s, 0, 32) if err != nil { return nil, err } - rv = ptr.To(uint32(metric)) + rv = ptr.To(int32(metric)) } return rv, nil } -func formatIPAddressName(name, device string) string { - return fmt.Sprintf("%s-%s", name, device) +// Todo: Useless, remove? +func machineHasIPAddress(machine *infrav1.ProxmoxMachine) bool { + // Every machine needs to have at least one IPv4 or IPv6 host network address + if machine.Status.IPAddresses[infrav1.DefaultNetworkDevice] == nil { + return false + } else { + return len(machine.Status.IPAddresses[infrav1.DefaultNetworkDevice].IPv4) > 0 || + len(machine.Status.IPAddresses[infrav1.DefaultNetworkDevice].IPv6) > 0 + } } -func machineHasIPAddress(machine *infrav1alpha1.ProxmoxMachine) bool { - return machine.Status.IPAddresses[infrav1alpha1.DefaultNetworkDevice] != (infrav1alpha1.IPAddress{}) -} +func handleIPAddresses(ctx context.Context, machineScope *scope.MachineScope, dev *string, poolNum int, poolRef *corev1.TypedLocalObjectReference) ([]ipamv1.IPAddress, error) { + device := ptr.Deref(dev, infrav1.DefaultNetworkDevice) -func handleIPAddressForDevice(ctx context.Context, machineScope *scope.MachineScope, device, format string, ipamRef *corev1.TypedLocalObjectReference) (string, error) { - suffix := infrav1alpha1.DefaultSuffix - if format == infrav1alpha1.IPV6Format { - suffix += "6" - } - formattedDevice := fmt.Sprintf("%s-%s", device, suffix) - ipAddr, err := findIPAddress(ctx, machineScope, formattedDevice) + ipAddresses, err := findIPAddress(ctx, poolRef, machineScope) if err != nil { + // Technically this error can not occure, as fieldselectors just return empty lists if !apierrors.IsNotFound(err) { - return "", err + return []ipamv1.IPAddress{}, err } + } + + if len(ipAddresses) == 0 { machineScope.Logger.V(4).Info("IPAddress not found, creating it.", "device", device) // IpAddress not yet created. - err = machineScope.IPAMHelper.CreateIPAddressClaim(ctx, machineScope.ProxmoxMachine, device, format, machineScope.InfraCluster.Cluster.GetName(), ipamRef) + err = machineScope.IPAMHelper.CreateIPAddressClaimV2(ctx, machineScope.ProxmoxMachine, device, poolNum, machineScope.InfraCluster.Cluster.GetName(), poolRef) if err != nil { - return "", errors.Wrapf(err, "unable to create Ip address claim for machine %s", machineScope.Name()) + return []ipamv1.IPAddress{}, errors.Wrapf(err, "unable to create Ip address claim for machine %s", machineScope.Name()) } - return "", nil - } - - ip := ipAddr.Spec.Address - machineScope.Logger.V(4).Info("IPAddress found, ", "ip", ip, "device", device) - - // format ipTag as `ip_net0_` - // to add it to the VM. - ipTag := fmt.Sprintf("ip_%s_%s", device, ip) - - // Add ip tag if the Virtual Machine doesn't have it. - if vm := machineScope.VirtualMachine; device == infrav1alpha1.DefaultNetworkDevice && !vm.HasTag(ipTag) && isIPV4(ip) { - machineScope.Logger.V(4).Info("adding virtual machine ip tag.") - t, err := machineScope.InfraCluster.ProxmoxClient.TagVM(ctx, vm, ipTag) - if err != nil { - return "", errors.Wrapf(err, "unable to add Ip tag to VirtualMachine %s", machineScope.Name()) - } - machineScope.ProxmoxMachine.Status.TaskRef = ptr.To(string(t.UPID)) - return "", nil + // send the machine to requeue so ipaddresses can be created + return []ipamv1.IPAddress{}, nil } - return ip, nil + machineScope.Logger.V(4).Info("IPAddresses found, ", "ip", ipAddresses, "device", device) + return ipAddresses, nil } -func handleDefaultDevice(ctx context.Context, machineScope *scope.MachineScope, addresses map[string]infrav1alpha1.IPAddress) (bool, error) { - // default network device ipv4. - if machineScope.InfraCluster.ProxmoxCluster.Spec.IPv4Config != nil || - (machineScope.ProxmoxMachine.Spec.Network != nil && machineScope.ProxmoxMachine.Spec.Network.Default.IPv4PoolRef != nil) { - var ipamRef *corev1.TypedLocalObjectReference - if machineScope.ProxmoxMachine.Spec.Network != nil && machineScope.ProxmoxMachine.Spec.Network.Default.IPv4PoolRef != nil { - ipamRef = machineScope.ProxmoxMachine.Spec.Network.Default.IPv4PoolRef - } - - ip, err := handleIPAddressForDevice(ctx, machineScope, infrav1alpha1.DefaultNetworkDevice, infrav1alpha1.IPV4Format, ipamRef) - if err != nil || ip == "" { - return true, err - } - addresses[infrav1alpha1.DefaultNetworkDevice] = infrav1alpha1.IPAddress{ - IPV4: ip, - } +func handleDevices(ctx context.Context, machineScope *scope.MachineScope, addresses map[string]map[corev1.TypedLocalObjectReference][]ipamv1.IPAddress) (bool, error) { + // paranoidly handle callers handing us an empty map + if addresses == nil { + return false, errors.New("handleDevices called without a map") } - // default network device ipv6. - if machineScope.InfraCluster.ProxmoxCluster.Spec.IPv6Config != nil || - (machineScope.ProxmoxMachine.Spec.Network != nil && machineScope.ProxmoxMachine.Spec.Network.Default.IPv6PoolRef != nil) { - var ipamRef *corev1.TypedLocalObjectReference - if machineScope.ProxmoxMachine.Spec.Network != nil && machineScope.ProxmoxMachine.Spec.Network.Default.IPv6PoolRef != nil { - ipamRef = machineScope.ProxmoxMachine.Spec.Network.Default.IPv6PoolRef - } - - ip, err := handleIPAddressForDevice(ctx, machineScope, infrav1alpha1.DefaultNetworkDevice, infrav1alpha1.IPV6Format, ipamRef) - if err != nil || ip == "" { - return true, err + for _, net := range ptr.Deref(machineScope.ProxmoxMachine.Spec.Network, infrav1.NetworkSpec{}).NetworkDevices { + // TODO: Where should prepending default clusterpools belong + // TODO: Network Zones + pools := []corev1.TypedLocalObjectReference{} + if *net.Name == infrav1.DefaultNetworkDevice { + poolsRef, err := GetInClusterIPPoolsFromMachine(ctx, machineScope) + if err != nil { + return false, err + } + pools = *poolsRef } - - addr := addresses[infrav1alpha1.DefaultNetworkDevice] - addr.IPV6 = ip - addresses[infrav1alpha1.DefaultNetworkDevice] = addr - } - return false, nil -} - -func handleAdditionalDevices(ctx context.Context, machineScope *scope.MachineScope, addresses map[string]infrav1alpha1.IPAddress) (bool, error) { - // additional network devices. - for _, net := range machineScope.ProxmoxMachine.Spec.Network.AdditionalDevices { - if net.IPv4PoolRef != nil { - ip, err := handleIPAddressForDevice(ctx, machineScope, net.Name, infrav1alpha1.IPV4Format, net.IPv4PoolRef) - if err != nil || ip == "" { - return true, errors.Wrapf(err, "unable to handle IPAddress for device %s", net.Name) + for i, ipPool := range slices.Concat(pools, net.InterfaceConfig.IPPoolRef) { + ipAddresses, err := handleIPAddresses(ctx, machineScope, net.Name, i, &ipPool) + if err != nil { + fmt.Println("handleDevices", "err", err, "ip", ipAddresses) + return true, errors.Wrapf(err, "unable to handle IPAddress for device %+v, pool %s", net.Name, ipPool.Name) } - addresses[net.Name] = infrav1alpha1.IPAddress{ - IPV4: ip, + // requeue machine if ipaddress need creation + if len(ipAddresses) == 0 { + return true, nil } - } - if net.IPv6PoolRef != nil { - ip, err := handleIPAddressForDevice(ctx, machineScope, net.Name, infrav1alpha1.IPV6Format, net.IPv6PoolRef) - if err != nil || ip == "" { - return true, errors.Wrapf(err, "unable to handle IPAddress for device %s", net.Name) + poolMap := addresses[*net.Name] + if poolMap == nil { + poolMap = make(map[corev1.TypedLocalObjectReference][]ipamv1.IPAddress) } - addr := addresses[net.Name] - addr.IPV6 = ip - addresses[net.Name] = addr + poolMap[ipPool] = ipAddresses + addresses[*net.Name] = poolMap } } return false, nil } -func isIPV4(ip string) bool { +func isIPv4(ip string) bool { return netip.MustParseAddr(ip).Is4() } diff --git a/internal/service/vmservice/ip_test.go b/internal/service/vmservice/ip_test.go index 0779204d..0aeb1a1c 100644 --- a/internal/service/vmservice/ip_test.go +++ b/internal/service/vmservice/ip_test.go @@ -18,91 +18,215 @@ package vmservice import ( "context" + "reflect" "testing" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" + "k8s.io/utils/ptr" + ipamicv1 "sigs.k8s.io/cluster-api-ipam-provider-in-cluster/api/v1alpha2" + "sigs.k8s.io/controller-runtime/pkg/client" - infrav1alpha1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" + infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha2" + . "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/consts" ) const ipTag = "ip_net0_10.10.10.10" +// TODO: actually prepend net0 ipaddress claim +// TestReconcileIPAddresses_CreateDefaultClaim tests if the cluster provided InclusterIPPool IPAddressClaim gets created. func TestReconcileIPAddresses_CreateDefaultClaim(t *testing.T) { - machineScope, _, _ := setupReconcilerTest(t) + machineScope, _, kubeClient := setupReconcilerTestWithCondition(t, infrav1.WaitingForStaticIPAllocationReason) + + machineScope.ProxmoxMachine.Spec.Network = &infrav1.NetworkSpec{ + NetworkDevices: []infrav1.NetworkDevice{ + {Name: ptr.To("net0")}, + }, + } requeue, err := reconcileIPAddresses(context.Background(), machineScope) require.NoError(t, err) require.True(t, requeue) - requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition) + + defaultPoolRefs := getDefaultPoolRefs(machineScope) + // test if IPAddressClaim was created + claimsDefaultPool := getIPAddressClaimsPerPool(t, kubeClient, machineScope, defaultPoolRefs[0].Name) + require.NotNil(t, claimsDefaultPool) + require.Equal(t, 1, len(*claimsDefaultPool)) + + requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition) } +// TestReconcileIPAddresses_CreateAdditionalClaim tests if an IPAddressClaim is created for the missing IPAddress on net1. func TestReconcileIPAddresses_CreateAdditionalClaim(t *testing.T) { - machineScope, _, kubeClient := setupReconcilerTest(t) - machineScope.ProxmoxMachine.Spec.Network = &infrav1alpha1.NetworkSpec{ - Default: &infrav1alpha1.NetworkDevice{Bridge: "vmbr0"}, - AdditionalDevices: []infrav1alpha1.AdditionalNetworkDevice{ - {Name: "net1", NetworkDevice: infrav1alpha1.NetworkDevice{IPPoolConfig: infrav1alpha1.IPPoolConfig{IPv4PoolRef: &corev1.TypedLocalObjectReference{Kind: "InClusterIPPool", Name: "custom"}}}}, + machineScope, _, kubeClient := setupReconcilerTestWithCondition(t, infrav1.WaitingForStaticIPAllocationReason) + + defaultPool := corev1.TypedLocalObjectReference{ + APIGroup: ptr.To(ipamicv1.GroupVersion.String()), + Kind: reflect.ValueOf(ipamicv1.InClusterIPPool{}).Type().Name(), + Name: getDefaultPoolRefs(machineScope)[0].Name, + } + extraPool0 := corev1.TypedLocalObjectReference{APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: GlobalInClusterIPPool, + Name: "extraPool0", + } + + machineScope.ProxmoxMachine.Spec.Network = &infrav1.NetworkSpec{ + NetworkDevices: []infrav1.NetworkDevice{ + {Name: ptr.To("net0")}, + { + Name: ptr.To("net1"), + InterfaceConfig: infrav1.InterfaceConfig{ + IPPoolRef: []corev1.TypedLocalObjectReference{extraPool0}, + }, + }, }, } + vm := newStoppedVM() vm.VirtualMachineConfig.Tags = ipTag machineScope.SetVirtualMachine(vm) - createIP4AddressResource(t, kubeClient, machineScope, infrav1alpha1.DefaultNetworkDevice, "10.10.10.10") + createIPPools(t, kubeClient, machineScope) + createIPv4AddressResource(t, kubeClient, machineScope, infrav1.DefaultNetworkDevice, "10.10.10.10", &defaultPool) requeue, err := reconcileIPAddresses(context.Background(), machineScope) + + // Since an IPAddress for extraPool0 still needs to be created, the machine should + // requeue without error. require.NoError(t, err) require.True(t, requeue) - requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition) + + // net1 should not exist yet, because IPAddress reconciliation should be unfinished + require.Nil(t, machineScope.ProxmoxMachine.Status.IPAddresses["net1"]) + + // test if IPAddressClaim was created + claimsExtraPool0 := getIPAddressClaimsPerPool(t, kubeClient, machineScope, extraPool0.Name) + require.NotNil(t, claimsExtraPool0) + require.Equal(t, 1, len(*claimsExtraPool0)) + + requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition) } +// TestReconcileIPAddresses_AddIPTag tests if a machine with all resources created will add a task to add tags to proxmox VMs. func TestReconcileIPAddresses_AddIPTag(t *testing.T) { - machineScope, proxmoxClient, kubeClient := setupReconcilerTest(t) + machineScope, proxmoxClient, kubeClient := setupReconcilerTestWithCondition(t, infrav1.WaitingForStaticIPAllocationReason) + + defaultPool := corev1.TypedLocalObjectReference{ + APIGroup: ptr.To(ipamicv1.GroupVersion.String()), + Kind: reflect.ValueOf(ipamicv1.InClusterIPPool{}).Type().Name(), + Name: getDefaultPoolRefs(machineScope)[0].Name, + } + machineScope.ProxmoxMachine.Spec.Network = &infrav1.NetworkSpec{ + NetworkDevices: []infrav1.NetworkDevice{ + {Name: ptr.To("net0")}, + }, + } + vm := newStoppedVM() task := newTask() machineScope.SetVirtualMachine(vm) - createIP4AddressResource(t, kubeClient, machineScope, infrav1alpha1.DefaultNetworkDevice, "10.10.10.10") + + createIPPools(t, kubeClient, machineScope) + createIPv4AddressResource(t, kubeClient, machineScope, infrav1.DefaultNetworkDevice, "10.10.10.10", &defaultPool) proxmoxClient.EXPECT().TagVM(context.Background(), vm, ipTag).Return(task, nil).Once() requeue, err := reconcileIPAddresses(context.Background(), machineScope) require.NoError(t, err) require.True(t, requeue) - requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition) + + // Machine should have one Task Pending + require.NotNil(t, machineScope.ProxmoxMachine.Status.TaskRef) + + // Task should be equal to fake result from TagVM + require.Equal(t, "result", *machineScope.ProxmoxMachine.Status.TaskRef) + + requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition) } +// TestReconcileIPAddresses_SetIPAddresses tests if proxmoxMachine.Status.IPAddresses gets reconciled. func TestReconcileIPAddresses_SetIPAddresses(t *testing.T) { - machineScope, _, kubeClient := setupReconcilerTest(t) - machineScope.ProxmoxMachine.Spec.Network = &infrav1alpha1.NetworkSpec{ - Default: &infrav1alpha1.NetworkDevice{Bridge: "vmbr0"}, - AdditionalDevices: []infrav1alpha1.AdditionalNetworkDevice{ - {Name: "net1", - NetworkDevice: infrav1alpha1.NetworkDevice{ - IPPoolConfig: infrav1alpha1.IPPoolConfig{IPv4PoolRef: &corev1.TypedLocalObjectReference{Kind: "GlobalInClusterIPPool", Name: "custom"}}, - }}, + machineScope, _, kubeClient := setupReconcilerTestWithCondition(t, infrav1.WaitingForStaticIPAllocationReason) + + defaultPool := corev1.TypedLocalObjectReference{ + APIGroup: ptr.To(ipamicv1.GroupVersion.String()), + Kind: reflect.ValueOf(ipamicv1.InClusterIPPool{}).Type().Name(), + Name: getDefaultPoolRefs(machineScope)[0].Name, + } + extraPool0 := corev1.TypedLocalObjectReference{APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: GlobalInClusterIPPool, + Name: "extraPool0", + } + + machineScope.ProxmoxMachine.Spec.Network = &infrav1.NetworkSpec{ + NetworkDevices: []infrav1.NetworkDevice{ + {Name: ptr.To("net0")}, + { + Name: ptr.To("net1"), + InterfaceConfig: infrav1.InterfaceConfig{ + IPPoolRef: []corev1.TypedLocalObjectReference{extraPool0}, + }, + }, }, } + createIPPools(t, kubeClient, machineScope) + createIPv4AddressResource(t, kubeClient, machineScope, infrav1.DefaultNetworkDevice, "10.10.10.10", &defaultPool) + createIPv4AddressResource(t, kubeClient, machineScope, "net1", "10.100.10.10", &extraPool0) + vm := newStoppedVM() vm.VirtualMachineConfig.Tags = ipTag machineScope.SetVirtualMachine(vm) - createIP4AddressResource(t, kubeClient, machineScope, infrav1alpha1.DefaultNetworkDevice, "10.10.10.10") - createIP4AddressResource(t, kubeClient, machineScope, "net1", "10.100.10.10") - createIPPools(t, kubeClient, machineScope) requeue, err := reconcileIPAddresses(context.Background(), machineScope) + require.NoError(t, err) require.True(t, requeue) - requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition) + + require.NotNil(t, machineScope.ProxmoxMachine.Status.IPAddresses["net0"]) + require.Equal(t, *(machineScope.ProxmoxMachine.Status.IPAddresses["net0"]), infrav1.IPAddresses{IPv4: []string{"10.10.10.10"}, IPv6: nil}) + require.NotNil(t, machineScope.ProxmoxMachine.Status.IPAddresses["net1"]) + require.Equal(t, *(machineScope.ProxmoxMachine.Status.IPAddresses["net1"]), infrav1.IPAddresses{IPv4: []string{"10.100.10.10"}, IPv6: nil}) + + requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition) } +// TestReconcileIPAddresses_MultipleDevices tests if proxmoxMachine.Status.IPAddresses gets reconciled with IPv4 and IPv6 on multiple devices. func TestReconcileIPAddresses_MultipleDevices(t *testing.T) { - machineScope, _, kubeClient := setupReconcilerTest(t) - machineScope.ProxmoxMachine.Spec.Network = &infrav1alpha1.NetworkSpec{ - Default: &infrav1alpha1.NetworkDevice{Bridge: "vmbr0"}, - AdditionalDevices: []infrav1alpha1.AdditionalNetworkDevice{ - {Name: "net1", NetworkDevice: infrav1alpha1.NetworkDevice{IPPoolConfig: infrav1alpha1.IPPoolConfig{IPv4PoolRef: &corev1.TypedLocalObjectReference{Kind: "GlobalInClusterIPPool", Name: "ipv4pool"}}}}, - {Name: "net2", NetworkDevice: infrav1alpha1.NetworkDevice{IPPoolConfig: infrav1alpha1.IPPoolConfig{IPv6PoolRef: &corev1.TypedLocalObjectReference{Kind: "GlobalInClusterIPPool", Name: "ipv6pool"}}}}, + machineScope, _, kubeClient := setupReconcilerTestWithCondition(t, infrav1.WaitingForStaticIPAllocationReason) + + defaultPool := corev1.TypedLocalObjectReference{ + APIGroup: ptr.To(ipamicv1.GroupVersion.String()), + Kind: reflect.ValueOf(ipamicv1.InClusterIPPool{}).Type().Name(), + Name: getDefaultPoolRefs(machineScope)[0].Name, + } + ipv4pool0 := corev1.TypedLocalObjectReference{APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: GlobalInClusterIPPool, + Name: "ipv4pool0", + } + ipv4pool1 := corev1.TypedLocalObjectReference{APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: GlobalInClusterIPPool, + Name: "ipv4pool1", + } + ipv6pool := corev1.TypedLocalObjectReference{APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: GlobalInClusterIPPool, + Name: "ipv6pool", + } + + machineScope.ProxmoxMachine.Spec.Network = &infrav1.NetworkSpec{ + NetworkDevices: []infrav1.NetworkDevice{ + { + Name: ptr.To(infrav1.DefaultNetworkDevice), + InterfaceConfig: infrav1.InterfaceConfig{IPPoolRef: []corev1.TypedLocalObjectReference{ipv4pool0}}, + }, + { + Name: ptr.To("net1"), + InterfaceConfig: infrav1.InterfaceConfig{IPPoolRef: []corev1.TypedLocalObjectReference{ipv4pool1}}, + }, + { + Name: ptr.To("net2"), + InterfaceConfig: infrav1.InterfaceConfig{IPPoolRef: []corev1.TypedLocalObjectReference{ipv6pool}}, + }, }, } @@ -110,75 +234,156 @@ func TestReconcileIPAddresses_MultipleDevices(t *testing.T) { vm.VirtualMachineConfig.Tags = ipTag machineScope.SetVirtualMachine(vm) - createIP4AddressResource(t, kubeClient, machineScope, infrav1alpha1.DefaultNetworkDevice, "10.10.10.10") - createIP4AddressResource(t, kubeClient, machineScope, "net1", "10.100.10.10") - createIP6AddressResource(t, kubeClient, machineScope, "net2", "fe80::ffee") createIPPools(t, kubeClient, machineScope) + createIPv4AddressResource(t, kubeClient, machineScope, infrav1.DefaultNetworkDevice, "10.10.10.10", &defaultPool) + createIPv4AddressResource(t, kubeClient, machineScope, infrav1.DefaultNetworkDevice, "10.11.10.10", &ipv4pool0) + createIPv4AddressResource(t, kubeClient, machineScope, "net1", "10.100.10.10", &ipv4pool1) + createIPv6AddressResource(t, kubeClient, machineScope, "net2", "fe80::ffee", &ipv6pool) requeue, err := reconcileIPAddresses(context.Background(), machineScope) require.NoError(t, err) require.True(t, requeue) + require.Len(t, machineScope.ProxmoxMachine.Status.IPAddresses, 3) - expected := map[string]infrav1alpha1.IPAddress{ - "net0": {IPV4: "10.10.10.10"}, - "net1": {IPV4: "10.100.10.10"}, - "net2": {IPV6: "fe80::ffee"}, - } + // TODO when we can ensure default ip comes first: require.Equal(t, "10.10.10.10", machineScope.ProxmoxMachine.Status.IPAddresses[infrav1.DefaultNetworkDevice].IPv4[0]) + require.ElementsMatch(t, []string{"10.10.10.10", "10.11.10.10"}, machineScope.ProxmoxMachine.Status.IPAddresses[infrav1.DefaultNetworkDevice].IPv4) + require.Nil(t, machineScope.ProxmoxMachine.Status.IPAddresses[infrav1.DefaultNetworkDevice].IPv6) + + require.ElementsMatch(t, []string{"10.100.10.10"}, machineScope.ProxmoxMachine.Status.IPAddresses["net1"].IPv4) + require.Nil(t, machineScope.ProxmoxMachine.Status.IPAddresses["net1"].IPv6) - require.Equal(t, expected, machineScope.ProxmoxMachine.Status.IPAddresses) - requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition) + require.Nil(t, machineScope.ProxmoxMachine.Status.IPAddresses["net2"].IPv4) + require.ElementsMatch(t, []string{"fe80::ffee"}, machineScope.ProxmoxMachine.Status.IPAddresses["net2"].IPv6) + + requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition) } -func TestReconcileIPAddresses_IPV6(t *testing.T) { - machineScope, _, kubeClient := setupReconcilerTest(t) - machineScope.InfraCluster.ProxmoxCluster.Spec.IPv6Config = &infrav1alpha1.IPConfigSpec{ - Addresses: []string{"fe80::/64"}, +// TestReconcileIPAddresses_IPv6 tests if proxmoxMachine.Status.IPAddresses gets reconciled with IPv4 and IPv6 on multiple devices. +func TestReconcileIPAddresses_IPv6(t *testing.T) { + machineScope, _, kubeClient := setupReconcilerTestWithCondition(t, infrav1.WaitingForStaticIPAllocationReason) + + // add test-v6-icip InClusterIPPool + proxmoxCluster := machineScope.InfraCluster.ProxmoxCluster + proxmoxCluster.Spec.IPv6Config = &infrav1.IPConfigSpec{ + Addresses: []string{"fe80::"}, Prefix: 64, - Gateway: "fe80::1", + Gateway: "fe80::", + Metric: nil, + } + require.NoError(t, kubeClient.Update(context.Background(), proxmoxCluster)) + + // Status can't be updated and needs to be patched + patch := client.MergeFrom(proxmoxCluster.DeepCopy()) + proxmoxCluster.Status.InClusterIPPoolRef = []corev1.LocalObjectReference{ + {Name: "test-v4-icip"}, + {Name: "test-v6-icip"}, + } + require.NoError(t, kubeClient.Status().Patch(context.Background(), proxmoxCluster, patch)) + + // create the extra ipv6 pool + require.NoError(t, machineScope.IPAMHelper.CreateOrUpdateInClusterIPPool(context.Background())) + + defaultPool := corev1.TypedLocalObjectReference{ + APIGroup: ptr.To(ipamicv1.GroupVersion.String()), + Kind: reflect.ValueOf(ipamicv1.InClusterIPPool{}).Type().Name(), + Name: getDefaultPoolRefs(machineScope)[0].Name, + } + defaultPoolV6 := corev1.TypedLocalObjectReference{APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: InClusterIPPool, + Name: getDefaultPoolRefs(machineScope)[1].Name, } - machineScope.ProxmoxMachine.Spec.Network = &infrav1alpha1.NetworkSpec{ - Default: &infrav1alpha1.NetworkDevice{Bridge: "vmbr0"}, - AdditionalDevices: []infrav1alpha1.AdditionalNetworkDevice{ - {Name: "net1", NetworkDevice: infrav1alpha1.NetworkDevice{IPPoolConfig: infrav1alpha1.IPPoolConfig{IPv4PoolRef: &corev1.TypedLocalObjectReference{Kind: "GlobalInClusterIPPool", Name: "custom"}}}}, + extraPool0 := corev1.TypedLocalObjectReference{APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: GlobalInClusterIPPool, + Name: "extrapool0", + } + + machineScope.ProxmoxMachine.Spec.Network = &infrav1.NetworkSpec{ + NetworkDevices: []infrav1.NetworkDevice{ + {Name: ptr.To("net0")}, + { + Name: ptr.To("net1"), + InterfaceConfig: infrav1.InterfaceConfig{IPPoolRef: []corev1.TypedLocalObjectReference{extraPool0}}, + }, }, } + vm := newStoppedVM() vm.VirtualMachineConfig.Tags = ipTag machineScope.SetVirtualMachine(vm) - createIP4AddressResource(t, kubeClient, machineScope, infrav1alpha1.DefaultNetworkDevice, "10.10.10.10") - createIP6AddressResource(t, kubeClient, machineScope, infrav1alpha1.DefaultNetworkDevice, "fe80::1") - createIP4AddressResource(t, kubeClient, machineScope, "net1", "10.100.10.10") createIPPools(t, kubeClient, machineScope) + createIPv4AddressResource(t, kubeClient, machineScope, infrav1.DefaultNetworkDevice, "10.10.10.10", &defaultPool) + createIPv6AddressResource(t, kubeClient, machineScope, infrav1.DefaultNetworkDevice, "fe80::1", &defaultPoolV6) + createIPv4AddressResource(t, kubeClient, machineScope, "net1", "10.100.10.10", &extraPool0) requeue, err := reconcileIPAddresses(context.Background(), machineScope) require.NoError(t, err) require.True(t, requeue) - requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition) + + // need to reconcile twice for the ipv6 default pool to be added + requeue, err = reconcileIPAddresses(context.Background(), machineScope) + require.NoError(t, err) + require.False(t, requeue) + + require.NotNil(t, machineScope.ProxmoxMachine.Status.IPAddresses["net0"]) + require.Equal(t, infrav1.IPAddresses{IPv4: []string{"10.10.10.10"}, IPv6: []string{"fe80::1"}}, *(machineScope.ProxmoxMachine.Status.IPAddresses["net0"])) + require.NotNil(t, machineScope.ProxmoxMachine.Status.IPAddresses["net1"]) + require.Equal(t, infrav1.IPAddresses{IPv4: []string{"10.100.10.10"}, IPv6: nil}, *(machineScope.ProxmoxMachine.Status.IPAddresses["net1"])) + + requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition) } +// TestReconcileIPAddresses_MachineIPPoolRef tests TODO: multiple claims from same pool. func TestReconcileIPAddresses_MachineIPPoolRef(t *testing.T) { - machineScope, _, kubeClient := setupReconcilerTest(t) - machineScope.ProxmoxMachine.Spec.Network = &infrav1alpha1.NetworkSpec{ - Default: &infrav1alpha1.NetworkDevice{ - IPPoolConfig: infrav1alpha1.IPPoolConfig{IPv4PoolRef: &corev1.TypedLocalObjectReference{Kind: "GlobalInClusterIPPool", Name: "custom-ips"}}, - }, - AdditionalDevices: []infrav1alpha1.AdditionalNetworkDevice{ - {Name: "net1", - NetworkDevice: infrav1alpha1.NetworkDevice{ - IPPoolConfig: infrav1alpha1.IPPoolConfig{IPv4PoolRef: &corev1.TypedLocalObjectReference{Kind: "GlobalInClusterIPPool", Name: "custom-additional-ips"}}, - }}, + machineScope, _, kubeClient := setupReconcilerTestWithCondition(t, infrav1.WaitingForStaticIPAllocationReason) + + defaultPool := corev1.TypedLocalObjectReference{ + APIGroup: ptr.To(ipamicv1.GroupVersion.String()), + Kind: reflect.ValueOf(ipamicv1.InClusterIPPool{}).Type().Name(), + Name: getDefaultPoolRefs(machineScope)[0].Name, + } + extraPool0 := corev1.TypedLocalObjectReference{APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: GlobalInClusterIPPool, + Name: "extrapool0", + } + extraPool1 := corev1.TypedLocalObjectReference{APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: GlobalInClusterIPPool, + Name: "extrapool1", + } + + machineScope.ProxmoxMachine.Spec.Network = &infrav1.NetworkSpec{ + NetworkDevices: []infrav1.NetworkDevice{ + { + Name: ptr.To("net0"), + InterfaceConfig: infrav1.InterfaceConfig{IPPoolRef: []corev1.TypedLocalObjectReference{extraPool0}}, + }, + { + Name: ptr.To("net1"), + InterfaceConfig: infrav1.InterfaceConfig{IPPoolRef: []corev1.TypedLocalObjectReference{extraPool1}}, + }, }, } + vm := newStoppedVM() vm.VirtualMachineConfig.Tags = ipTag machineScope.SetVirtualMachine(vm) - createIP4AddressResource(t, kubeClient, machineScope, infrav1alpha1.DefaultNetworkDevice, "10.10.10.10") - createIP4AddressResource(t, kubeClient, machineScope, "net1", "10.100.10.10") + + defaultIP := "10.10.10.10" createIPPools(t, kubeClient, machineScope) + createIPv4AddressResource(t, kubeClient, machineScope, infrav1.DefaultNetworkDevice, defaultIP, &defaultPool) + createIPv4AddressResource(t, kubeClient, machineScope, infrav1.DefaultNetworkDevice, "10.50.10.10", &extraPool0) + createIPv4AddressResource(t, kubeClient, machineScope, "net1", "10.100.10.10", &extraPool1) requeue, err := reconcileIPAddresses(context.Background(), machineScope) require.NoError(t, err) require.True(t, requeue) - requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition) + + require.NotNil(t, machineScope.ProxmoxMachine.Status.IPAddresses[infrav1.DefaultNetworkDevice]) + // TODO when we can ensure default ip comes first: require.Equal(t, defaultIP, machineScope.ProxmoxMachine.Status.IPAddresses[infrav1.DefaultNetworkDevice].IPv4[0]) + require.ElementsMatch(t, []string{defaultIP, "10.50.10.10"}, machineScope.ProxmoxMachine.Status.IPAddresses[infrav1.DefaultNetworkDevice].IPv4) + require.Nil(t, machineScope.ProxmoxMachine.Status.IPAddresses[infrav1.DefaultNetworkDevice].IPv6) + require.NotNil(t, machineScope.ProxmoxMachine.Status.IPAddresses["net1"]) + require.Equal(t, infrav1.IPAddresses{IPv4: []string{"10.100.10.10"}, IPv6: nil}, *(machineScope.ProxmoxMachine.Status.IPAddresses["net1"])) + + requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition) } diff --git a/internal/service/vmservice/power.go b/internal/service/vmservice/power.go index a47453e5..3315f05b 100644 --- a/internal/service/vmservice/power.go +++ b/internal/service/vmservice/power.go @@ -25,25 +25,31 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" - infrav1alpha1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" + infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha2" capmox "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/proxmox" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/scope" ) func reconcilePowerState(ctx context.Context, machineScope *scope.MachineScope) (requeue bool, err error) { - if !machineHasIPAddress(machineScope.ProxmoxMachine) { - machineScope.V(4).Info("ip address not set for machine") - // machine doesn't have an ip address yet - // needs to reconcile again - return true, nil + if conditions.GetReason(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition) != infrav1.WaitingForVMPowerUpReason { + // Machine is in the wrong state to reconcile, we only reconcile machines waiting to power on + return false, nil } + /* + if !machineHasIPAddress(machineScope.ProxmoxMachine) { + machineScope.V(4).Info("ip address not set for machine") + // machine doesn't have an ip address yet + // needs to reconcile again + return true, nil + } + */ + machineScope.V(4).Info("ensuring machine is started") - conditions.MarkFalse(machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition, infrav1alpha1.PoweringOnReason, clusterv1.ConditionSeverityInfo, "") t, err := startVirtualMachine(ctx, machineScope.InfraCluster.ProxmoxClient, machineScope.VirtualMachine) if err != nil { - conditions.MarkFalse(machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition, infrav1alpha1.PoweringOnFailedReason, clusterv1.ConditionSeverityInfo, "%s", err) + conditions.MarkFalse(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition, infrav1.PoweringOnFailedReason, clusterv1.ConditionSeverityInfo, "%s", err) return false, err } @@ -52,6 +58,7 @@ func reconcilePowerState(ctx context.Context, machineScope *scope.MachineScope) return true, nil } + conditions.MarkFalse(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition, infrav1.WaitingForCloudInitReason, clusterv1.ConditionSeverityInfo, "") return false, nil } diff --git a/internal/service/vmservice/power_test.go b/internal/service/vmservice/power_test.go index 311dad70..a49c0a2c 100644 --- a/internal/service/vmservice/power_test.go +++ b/internal/service/vmservice/power_test.go @@ -22,22 +22,24 @@ import ( "github.com/stretchr/testify/require" - infrav1alpha1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" + infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha2" ) +/* Todo: Test is useless because of state machine func TestReconcilePowerState_MissingIPAddress(t *testing.T) { - machineScope, _, _ := setupReconcilerTest(t) + machineScope, _, _ := setupReconcilerTestWithCondition(t, infrav1.WaitingForVMPowerUpReason) requeue, err := reconcilePowerState(context.TODO(), machineScope) require.True(t, requeue) require.NoError(t, err) require.Nil(t, machineScope.ProxmoxMachine.Status.TaskRef) } +*/ func TestReconcilePowerState_SetTaskRef(t *testing.T) { ctx := context.TODO() - machineScope, proxmoxClient, _ := setupReconcilerTest(t) - machineScope.ProxmoxMachine.Status.IPAddresses = map[string]infrav1alpha1.IPAddress{infrav1alpha1.DefaultNetworkDevice: {IPV4: "10.10.10.10"}} + machineScope, proxmoxClient, _ := setupReconcilerTestWithCondition(t, infrav1.WaitingForVMPowerUpReason) + machineScope.ProxmoxMachine.Status.IPAddresses = map[string]*infrav1.IPAddresses{infrav1.DefaultNetworkDevice: {IPv4: []string{"10.10.10.10"}}} vm := newStoppedVM() task := newTask() @@ -52,7 +54,7 @@ func TestReconcilePowerState_SetTaskRef(t *testing.T) { func TestStartVirtualMachine_Paused(t *testing.T) { ctx := context.TODO() - _, proxmoxClient, _ := setupReconcilerTest(t) + _, proxmoxClient, _ := setupReconcilerTestWithCondition(t, infrav1.WaitingForVMPowerUpReason) vm := newPausedVM() proxmoxClient.EXPECT().ResumeVM(ctx, vm).Return(newTask(), nil).Once() @@ -63,7 +65,7 @@ func TestStartVirtualMachine_Paused(t *testing.T) { func TestStartVirtualMachine_Stopped(t *testing.T) { ctx := context.TODO() - _, proxmoxClient, _ := setupReconcilerTest(t) + _, proxmoxClient, _ := setupReconcilerTestWithCondition(t, infrav1.WaitingForVMPowerUpReason) vm := newStoppedVM() proxmoxClient.EXPECT().StartVM(ctx, vm).Return(newTask(), nil).Once() @@ -74,7 +76,7 @@ func TestStartVirtualMachine_Stopped(t *testing.T) { func TestStartVirtualMachine_Hibernated(t *testing.T) { ctx := context.TODO() - _, proxmoxClient, _ := setupReconcilerTest(t) + _, proxmoxClient, _ := setupReconcilerTestWithCondition(t, infrav1.WaitingForVMPowerUpReason) vm := newHibernatedVM() proxmoxClient.EXPECT().StartVM(ctx, vm).Return(newTask(), nil).Once() @@ -84,7 +86,7 @@ func TestStartVirtualMachine_Hibernated(t *testing.T) { } func TestStartVirtualMachine_Started(t *testing.T) { - _, proxmoxClient, _ := setupReconcilerTest(t) + _, proxmoxClient, _ := setupReconcilerTestWithCondition(t, infrav1.WaitingForVMPowerUpReason) vm := newRunningVM() task, err := startVirtualMachine(context.TODO(), proxmoxClient, vm) diff --git a/internal/service/vmservice/utils.go b/internal/service/vmservice/utils.go index 394b2a18..f71b0536 100644 --- a/internal/service/vmservice/utils.go +++ b/internal/service/vmservice/utils.go @@ -17,25 +17,50 @@ limitations under the License. package vmservice import ( + "context" "fmt" "regexp" "strconv" "strings" "github.com/google/uuid" + corev1 "k8s.io/api/core/v1" + "k8s.io/utils/ptr" - infrav1alpha1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" + ipamicv1 "sigs.k8s.io/cluster-api-ipam-provider-in-cluster/api/v1alpha2" + + infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha2" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/scope" ) const ( - // DefaultNetworkDeviceIPV4 is the default network device name for ipv4. - DefaultNetworkDeviceIPV4 = "net0-inet" + // DefaultNetworkDeviceIPv4 is the default network device name for ipv4. + DefaultNetworkDeviceIPv4 = "net0-inet" - // DefaultNetworkDeviceIPV6 is the default network device name for ipv6. - DefaultNetworkDeviceIPV6 = "net0-inet6" + // DefaultNetworkDeviceIPv6 is the default network device name for ipv6. + DefaultNetworkDeviceIPv6 = "net0-inet6" ) +func GetInClusterIPPoolsFromMachine(ctx context.Context, machineScope *scope.MachineScope) (*[]corev1.TypedLocalObjectReference, error) { + pools, _ := machineScope.IPAMHelper.GetInClusterPools(ctx, machineScope.ProxmoxMachine) + + ret := []corev1.TypedLocalObjectReference{} + + // TODO: move one function upwards + for _, pool := range []*ipamicv1.InClusterIPPool{pools["ipv4"], pools["ipv6"]} { + if pool != nil { + poolRef := corev1.TypedLocalObjectReference{ + APIGroup: ptr.To(ipamicv1.GroupVersion.String()), + Name: pool.Name, + Kind: pool.TypeMeta.Kind, + } + ret = append(ret, poolRef) + } + } + + return &ret, nil +} + func extractUUID(input string) string { pattern := `(^|,)uuid=([0-9a-fA-F-]+)` @@ -79,30 +104,30 @@ func extractNetworkBridge(input string) string { } // extractNetworkMTU returns the mtu out of net device input e.g. virtio=A6:23:64:4D:84:CB,bridge=vmbr1,mtu=1500. -func extractNetworkMTU(input string) uint16 { +func extractNetworkMTU(input string) int32 { re := regexp.MustCompile(`mtu=(\d+)`) match := re.FindStringSubmatch(input) if len(match) > 1 { - mtu, err := strconv.ParseUint(match[1], 10, 16) + mtu, err := strconv.ParseInt(match[1], 10, 32) if err != nil { return 0 } - return uint16(mtu) + return int32(mtu) } return 0 } // extractNetworkVLAN returns the vlan out of net device input e.g. virtio=A6:23:64:4D:84:CB,bridge=vmbr1,mtu=1500,tag=100. -func extractNetworkVLAN(input string) uint16 { +func extractNetworkVLAN(input string) int32 { re := regexp.MustCompile(`tag=(\d+)`) match := re.FindStringSubmatch(input) if len(match) > 1 { - vlan, err := strconv.ParseUint(match[1], 10, 16) + vlan, err := strconv.ParseInt(match[1], 10, 32) if err != nil { return 0 } - return uint16(vlan) + return int32(vlan) } return 0 @@ -116,41 +141,9 @@ func shouldUpdateNetworkDevices(machineScope *scope.MachineScope) bool { nets := machineScope.VirtualMachine.VirtualMachineConfig.MergeNets() - if machineScope.ProxmoxMachine.Spec.Network.Default != nil { - net0 := nets[infrav1alpha1.DefaultNetworkDevice] - if net0 == "" { - return true - } - - desiredDefault := *machineScope.ProxmoxMachine.Spec.Network.Default - - model := extractNetworkModel(net0) - bridge := extractNetworkBridge(net0) - - if model != *desiredDefault.Model || bridge != desiredDefault.Bridge { - return true - } - - if desiredDefault.MTU != nil { - mtu := extractNetworkMTU(net0) - - if mtu != *desiredDefault.MTU { - return true - } - } - - if desiredDefault.VLAN != nil { - vlan := extractNetworkVLAN(net0) - - if vlan != *desiredDefault.VLAN { - return true - } - } - } - - devices := machineScope.ProxmoxMachine.Spec.Network.AdditionalDevices + devices := machineScope.ProxmoxMachine.Spec.Network.NetworkDevices for _, v := range devices { - net := nets[v.Name] + net := nets[ptr.Deref(v.Name, infrav1.DefaultNetworkDevice)] // device is empty. if len(net) == 0 { return true @@ -160,7 +153,7 @@ func shouldUpdateNetworkDevices(machineScope *scope.MachineScope) bool { bridge := extractNetworkBridge(net) // current is different from the desired spec. - if model != *v.Model || bridge != v.Bridge { + if model != *v.Model || bridge != *v.Bridge { return true } @@ -186,7 +179,7 @@ func shouldUpdateNetworkDevices(machineScope *scope.MachineScope) bool { // formatNetworkDevice formats a network device config // example 'virtio,bridge=vmbr0,tag=100'. -func formatNetworkDevice(model, bridge string, mtu *uint16, vlan *uint16) string { +func formatNetworkDevice(model, bridge string, mtu *int32, vlan *int32) string { var components = []string{model, fmt.Sprintf("bridge=%s", bridge)} if mtu != nil { diff --git a/internal/service/vmservice/utils_test.go b/internal/service/vmservice/utils_test.go index cf808990..fcbc07f1 100644 --- a/internal/service/vmservice/utils_test.go +++ b/internal/service/vmservice/utils_test.go @@ -22,7 +22,7 @@ import ( "github.com/stretchr/testify/require" "k8s.io/utils/ptr" - infrav1alpha1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" + infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha2" ) func TestExtractUUID(t *testing.T) { @@ -126,7 +126,7 @@ func TestExtractNetworkBridge(t *testing.T) { func TestExtractNetworkMTU(t *testing.T) { type match struct { test string - expected uint16 + expected int32 } goodstrings := []match{ @@ -151,7 +151,7 @@ func TestExtractNetworkMTU(t *testing.T) { for _, s := range badstrings { mtu := extractNetworkMTU(s) - require.Equal(t, uint16(0), mtu) + require.Equal(t, int32(0), mtu) } } @@ -163,8 +163,8 @@ func TestShouldUpdateNetworkDevices_NoNetworkConfig(t *testing.T) { func TestShouldUpdateNetworkDevices_MissingDefaultDeviceOnVM(t *testing.T) { machineScope, _, _ := setupReconcilerTest(t) - machineScope.ProxmoxMachine.Spec.Network = &infrav1alpha1.NetworkSpec{ - Default: &infrav1alpha1.NetworkDevice{Bridge: "vmbr1", Model: ptr.To("virtio")}, + machineScope.ProxmoxMachine.Spec.Network = &infrav1.NetworkSpec{ + NetworkDevices: []infrav1.NetworkDevice{{Bridge: ptr.To("vmbr1"), Model: ptr.To("virtio")}}, } machineScope.SetVirtualMachine(newStoppedVM()) @@ -173,8 +173,8 @@ func TestShouldUpdateNetworkDevices_MissingDefaultDeviceOnVM(t *testing.T) { func TestShouldUpdateNetworkDevices_DefaultDeviceNeedsUpdate(t *testing.T) { machineScope, _, _ := setupReconcilerTest(t) - machineScope.ProxmoxMachine.Spec.Network = &infrav1alpha1.NetworkSpec{ - Default: &infrav1alpha1.NetworkDevice{Bridge: "vmbr1", Model: ptr.To("virtio")}, + machineScope.ProxmoxMachine.Spec.Network = &infrav1.NetworkSpec{ + NetworkDevices: []infrav1.NetworkDevice{{Bridge: ptr.To("vmbr1"), Model: ptr.To("virtio")}}, } machineScope.SetVirtualMachine(newVMWithNets("virtio=A6:23:64:4D:84:CB,bridge=vmbr0")) @@ -183,9 +183,9 @@ func TestShouldUpdateNetworkDevices_DefaultDeviceNeedsUpdate(t *testing.T) { func TestShouldUpdateNetworkDevices_MissingAdditionalDeviceOnVM(t *testing.T) { machineScope, _, _ := setupReconcilerTest(t) - machineScope.ProxmoxMachine.Spec.Network = &infrav1alpha1.NetworkSpec{ - AdditionalDevices: []infrav1alpha1.AdditionalNetworkDevice{ - {Name: "net1", NetworkDevice: infrav1alpha1.NetworkDevice{Bridge: "vmbr1", Model: ptr.To("virtio")}}, + machineScope.ProxmoxMachine.Spec.Network = &infrav1.NetworkSpec{ + NetworkDevices: []infrav1.NetworkDevice{ + {Name: ptr.To("net1"), Bridge: ptr.To("vmbr1"), Model: ptr.To("virtio")}, }, } machineScope.SetVirtualMachine(newVMWithNets("virtio=A6:23:64:4D:84:CB,bridge=vmbr0")) @@ -195,9 +195,9 @@ func TestShouldUpdateNetworkDevices_MissingAdditionalDeviceOnVM(t *testing.T) { func TestShouldUpdateNetworkDevices_AdditionalDeviceNeedsUpdate(t *testing.T) { machineScope, _, _ := setupReconcilerTest(t) - machineScope.ProxmoxMachine.Spec.Network = &infrav1alpha1.NetworkSpec{ - AdditionalDevices: []infrav1alpha1.AdditionalNetworkDevice{ - {Name: "net1", NetworkDevice: infrav1alpha1.NetworkDevice{Bridge: "vmbr1", Model: ptr.To("virtio")}}, + machineScope.ProxmoxMachine.Spec.Network = &infrav1.NetworkSpec{ + NetworkDevices: []infrav1.NetworkDevice{ + {Name: ptr.To("net1"), Bridge: ptr.To("vmbr1"), Model: ptr.To("virtio")}, }, } machineScope.SetVirtualMachine(newVMWithNets("", "virtio=A6:23:64:4D:84:CB,bridge=vmbr0")) @@ -207,10 +207,10 @@ func TestShouldUpdateNetworkDevices_AdditionalDeviceNeedsUpdate(t *testing.T) { func TestShouldUpdateNetworkDevices_NoUpdate(t *testing.T) { machineScope, _, _ := setupReconcilerTest(t) - machineScope.ProxmoxMachine.Spec.Network = &infrav1alpha1.NetworkSpec{ - Default: &infrav1alpha1.NetworkDevice{Bridge: "vmbr0", Model: ptr.To("virtio"), MTU: ptr.To(uint16(1500))}, - AdditionalDevices: []infrav1alpha1.AdditionalNetworkDevice{ - {Name: "net1", NetworkDevice: infrav1alpha1.NetworkDevice{Bridge: "vmbr1", Model: ptr.To("virtio"), MTU: ptr.To(uint16(1500))}}, + machineScope.ProxmoxMachine.Spec.Network = &infrav1.NetworkSpec{ + NetworkDevices: []infrav1.NetworkDevice{ + {Bridge: ptr.To("vmbr0"), Model: ptr.To("virtio"), MTU: ptr.To(int32(1500))}, + {Name: ptr.To("net1"), Bridge: ptr.To("vmbr1"), Model: ptr.To("virtio"), MTU: ptr.To(int32(1500))}, }, } machineScope.SetVirtualMachine(newVMWithNets("virtio=A6:23:64:4D:84:CD,bridge=vmbr0,mtu=1500", "virtio=A6:23:64:4D:84:CD,bridge=vmbr1,mtu=1500")) @@ -221,7 +221,7 @@ func TestShouldUpdateNetworkDevices_NoUpdate(t *testing.T) { func TestExtractNetworkVLAN(t *testing.T) { type match struct { test string - expected uint16 + expected int32 } goodstrings := []match{ @@ -246,14 +246,14 @@ func TestExtractNetworkVLAN(t *testing.T) { for _, s := range badstrings { vlan := extractNetworkVLAN(s) - require.Equal(t, uint16(0), vlan) + require.Equal(t, int32(0), vlan) } } func TestShouldUpdateNetworkDevices_VLANChanged(t *testing.T) { machineScope, _, _ := setupReconcilerTest(t) - machineScope.ProxmoxMachine.Spec.Network = &infrav1alpha1.NetworkSpec{ - Default: &infrav1alpha1.NetworkDevice{Bridge: "vmbr0", Model: ptr.To("virtio"), VLAN: ptr.To(uint16(100))}, + machineScope.ProxmoxMachine.Spec.Network = &infrav1.NetworkSpec{ + NetworkDevices: []infrav1.NetworkDevice{{Bridge: ptr.To("vmbr0"), Model: ptr.To("virtio"), VLAN: ptr.To(int32(100))}}, } machineScope.SetVirtualMachine(newVMWithNets("virtio=A6:23:64:4D:84:CB,bridge=vmbr0,tag=101")) diff --git a/internal/service/vmservice/vm.go b/internal/service/vmservice/vm.go index 5b2ef743..7a11cf09 100644 --- a/internal/service/vmservice/vm.go +++ b/internal/service/vmservice/vm.go @@ -30,7 +30,7 @@ import ( "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" - infrav1alpha1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" + infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha2" "github.com/ionos-cloud/cluster-api-provider-proxmox/internal/inject" "github.com/ionos-cloud/cluster-api-provider-proxmox/internal/service/scheduler" "github.com/ionos-cloud/cluster-api-provider-proxmox/internal/service/taskservice" @@ -58,11 +58,11 @@ var ErrNoVMIDInRangeFree = errors.New("No free vmid found in vmIDRange") // 2. Updating the VM with the bootstrap data, such as the cloud-init meta and user data, before... // 3. Powering on the VM, and finally... // 4. Returning the real-time state of the VM to the caller -func ReconcileVM(ctx context.Context, scope *scope.MachineScope) (infrav1alpha1.VirtualMachine, error) { +func ReconcileVM(ctx context.Context, scope *scope.MachineScope) (infrav1.VirtualMachine, error) { // Initialize the result. - vm := infrav1alpha1.VirtualMachine{ + vm := infrav1.VirtualMachine{ Name: scope.Name(), - State: infrav1alpha1.VirtualMachineStatePending, + State: infrav1.VirtualMachineStatePending, } // If there is an in-flight task associated with this VM then do not @@ -70,37 +70,55 @@ func ReconcileVM(ctx context.Context, scope *scope.MachineScope) (infrav1alpha1. if inFlight, err := taskservice.ReconcileInFlightTask(ctx, scope); err != nil || inFlight { return vm, err } + scope.Logger.V(4).Info("proxmox machine state", "state", conditions.GetReason(scope.ProxmoxMachine, infrav1.VMProvisionedCondition)) + // TODO: This requires a proper state machine. We're reusing + // the condition reasons in VMProvisionedConditions as a state machine + // for convenience, but this definitely needs to be refactored. if requeue, err := ensureVirtualMachine(ctx, scope); err != nil || requeue { return vm, err - } + } // VMProvisionedCondition reason is infrav1.CloningReason if requeue, err := reconcileVirtualMachineConfig(ctx, scope); err != nil || requeue { + scope.Logger.V(4).Info("after reconcileVirtualMachineCOnfig", "machineName", scope.ProxmoxMachine.GetName(), "requeue", requeue, "err", err) return vm, err - } + } // VMProvisionedCondition reason is infrav1.WaitingForDiskReconcilationReason if err := reconcileDisks(ctx, scope); err != nil { + scope.Logger.V(4).Info("after reconcileDisks", "machineName", scope.ProxmoxMachine.GetName(), "err", err) return vm, err - } + } // VMProvisionedCondition reason is infrav1.WaitingForStaticIPAllocationReason if requeue, err := reconcileIPAddresses(ctx, scope); err != nil || requeue { + scope.Logger.V(4).Info("after reconcileIPAddresses", "machineName", scope.ProxmoxMachine.GetName(), "requeue", requeue, "err", err) return vm, err - } + } // VMProvisionedCondition reason is infrav1.WaitingForBootstrapDataReason if requeue, err := reconcileBootstrapData(ctx, scope); err != nil || requeue { + scope.Logger.V(4).Info("after reconcileBootstrapData", "machineName", scope.ProxmoxMachine.GetName(), "requeue", requeue, "err", err) return vm, err - } + } // VMProvisionedCondition reason is infrav1.WaitingForVMPowerUpReason if requeue, err := reconcilePowerState(ctx, scope); err != nil || requeue { + scope.Logger.V(4).Info("after reconcilePowerState", "machineName", scope.ProxmoxMachine.GetName(), "requeue", requeue, "err", err) return vm, err - } + } // VMProvisionedCondition reason is infrav1.WaitingForClusterAPIMachineAddressesReason if err := reconcileMachineAddresses(scope); err != nil { + scope.Logger.V(4).Info("after reconcileMachineAddresses", "machineName", scope.ProxmoxMachine.GetName(), "err", err) return vm, err - } + } // VMProvisionedCondition reason is infrav1.WaitingForCloudInitReason if requeue, err := checkCloudInitStatus(ctx, scope); err != nil || requeue { + scope.Logger.V(4).Info("after checkCloudInitStatus", "machineName", scope.ProxmoxMachine.GetName(), "requeue", requeue, "err", err) return vm, err + } // VMProvisionedCondition reason is infrav1.WaitingForBootstrapReadyReason + + // handle invalid state of the machine + if conditions.GetReason(scope.ProxmoxMachine, infrav1.VMProvisionedCondition) == infrav1.VMProvisionFailedReason { + scope.Logger.V(4).Info("invalid proxmoxmachine state", "state", conditions.GetReason(scope.ProxmoxMachine, infrav1.VMProvisionedCondition)) + // If you end up here, please file a bug report. + return vm, errors.New("invalid state (failed and no error)") } // if the root machine is ready, we can assume that the VM is ready as well. @@ -109,16 +127,17 @@ func ReconcileVM(ctx context.Context, scope *scope.MachineScope) (infrav1alpha1. if err := unmountCloudInitISO(ctx, scope); err != nil { return vm, errors.Wrapf(err, "failed to unmount cloud-init iso for vm %s", scope.Name()) } - } + } // State Machine is finished + scope.Logger.V(4).Info("condition", "condition", conditions.GetReason(scope.ProxmoxMachine, infrav1.VMProvisionedCondition)) - vm.State = infrav1alpha1.VirtualMachineStateReady + vm.State = infrav1.VirtualMachineStateReady return vm, nil } func checkCloudInitStatus(ctx context.Context, machineScope *scope.MachineScope) (requeue bool, err error) { - if !machineScope.VirtualMachine.IsRunning() { - // skip if the vm is not running. - return true, nil + if conditions.GetReason(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition) != infrav1.WaitingForCloudInitReason { + // Machine is in the wrong state to reconcile, we only reconcile machines waiting for cloud init + return false, nil } if !machineScope.SkipQemuGuestCheck() { @@ -127,13 +146,14 @@ func checkCloudInitStatus(ctx context.Context, machineScope *scope.MachineScope) } } + // TODO: Is there a status for Ignition? if !machineScope.SkipCloudInitCheck() { if running, err := machineScope.InfraCluster.ProxmoxClient.CloudInitStatus(ctx, machineScope.VirtualMachine); err != nil || running { if running { return true, nil } if errors.Is(goproxmox.ErrCloudInitFailed, err) { - conditions.MarkFalse(machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition, infrav1alpha1.VMProvisionFailedReason, clusterv1.ConditionSeverityError, "%s", err) + conditions.MarkFalse(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition, infrav1.VMProvisionFailedReason, clusterv1.ConditionSeverityError, "%s", err) machineScope.SetFailureMessage(err) machineScope.SetFailureReason(capierrors.MachineStatusError("BootstrapFailed")) } @@ -141,6 +161,7 @@ func checkCloudInitStatus(ctx context.Context, machineScope *scope.MachineScope) } } + conditions.MarkFalse(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition, infrav1.WaitingForBootstrapReadyReason, clusterv1.ConditionSeverityInfo, "") return false, nil } @@ -150,8 +171,20 @@ func ensureVirtualMachine(ctx context.Context, machineScope *scope.MachineScope) if machineScope.ProxmoxMachine.Status.TaskRef != nil { return true, nil } + + // Initialize the state machine for proxmox machine deployment. + // NOTE: We are setting this condition only in case it does not exist, so we avoid to get flickering LastConditionTime + // in case of cloning errors or powering on errors. + if !conditions.Has(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition) || + conditions.GetReason(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition) == infrav1.WaitingForClusterInfrastructureReason || + conditions.GetReason(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition) == infrav1.WaitingForBootstrapDataReason { + conditions.MarkFalse(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition, infrav1.CloningReason, clusterv1.ConditionSeverityInfo, "") + } + // Before going further, we need the VM's managed object reference. vmRef, err := FindVM(ctx, machineScope) + + // TODO: Codeflow if err != nil { switch { case errors.Is(err, ErrVMNotFound): @@ -167,17 +200,10 @@ func ensureVirtualMachine(ctx context.Context, machineScope *scope.MachineScope) return false, err } - // Otherwise, this is a new machine and the VM should be created. - // NOTE: We are setting this condition only in case it does not exist, so we avoid to get flickering LastConditionTime - // in case of cloning errors or powering on errors. - if !conditions.Has(machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition) { - conditions.MarkFalse(machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition, infrav1alpha1.CloningReason, clusterv1.ConditionSeverityInfo, "") - } - // Create the VM. resp, err := createVM(ctx, machineScope) if err != nil { - conditions.MarkFalse(machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition, infrav1alpha1.CloningFailedReason, clusterv1.ConditionSeverityWarning, "%s", err) + conditions.MarkFalse(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition, infrav1.CloningFailedReason, clusterv1.ConditionSeverityWarning, "%s", err) return false, err } machineScope.Logger.V(4).Info("Task created", "taskID", resp.Task.ID) @@ -186,6 +212,7 @@ func ensureVirtualMachine(ctx context.Context, machineScope *scope.MachineScope) machineScope.ProxmoxMachine.Status.TaskRef = ptr.To(string(resp.Task.UPID)) machineScope.SetVirtualMachineID(resp.NewID) + // requeue until cloning is finished return true, nil } @@ -196,35 +223,46 @@ func ensureVirtualMachine(ctx context.Context, machineScope *scope.MachineScope) // setting the VirtualMachine object for completing the reconciliation. machineScope.SetVirtualMachine(vmRef) + // at this point the VM is found, so err must be nil return false, nil } func reconcileDisks(ctx context.Context, machineScope *scope.MachineScope) error { - machineScope.V(4).Info("reconciling disks") - disks := machineScope.ProxmoxMachine.Spec.Disks - if disks == nil { - // nothing to do + if conditions.GetReason(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition) != infrav1.WaitingForDiskReconcilationReason { + // Machine is in the wrong state to reconcile, we only reconcile Cloning VMs return nil } - vm := machineScope.VirtualMachine - if vm.IsRunning() || machineScope.ProxmoxMachine.Status.Ready { - // We only want to do this before the machine was started or is ready - return nil - } + machineScope.V(4).Info("reconciling disks") + disks := machineScope.ProxmoxMachine.Spec.Disks + + if disks != nil { + vm := machineScope.VirtualMachine + if vm.IsRunning() || ptr.Deref(machineScope.ProxmoxMachine.Status.Ready, false) { + // We only want to do this before the machine was started or is ready + return nil + } - if bv := disks.BootVolume; bv != nil { - if _, err := machineScope.InfraCluster.ProxmoxClient.ResizeDisk(ctx, vm, bv.Disk, bv.FormatSize()); err != nil { - machineScope.Error(err, "unable to set disk size", "vm", machineScope.VirtualMachine.VMID) - return err + if bv := disks.BootVolume; bv != nil { + if _, err := machineScope.InfraCluster.ProxmoxClient.ResizeDisk(ctx, vm, bv.Disk, bv.FormatSize()); err != nil { + machineScope.Error(err, "unable to set disk size", "vm", machineScope.VirtualMachine.VMID) + return err + } } } + // Machine is now waiting for IPAddress Allocations, move State Machine along + conditions.MarkFalse(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition, infrav1.WaitingForStaticIPAllocationReason, clusterv1.ConditionSeverityInfo, "") return nil } func reconcileVirtualMachineConfig(ctx context.Context, machineScope *scope.MachineScope) (requeue bool, err error) { - if machineScope.VirtualMachine.IsRunning() || machineScope.ProxmoxMachine.Status.Ready { + if conditions.GetReason(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition) != infrav1.CloningReason { + // Machine is in the wrong state to reconcile, we only reconcile Cloning VMs. + return false, nil + } + + if machineScope.VirtualMachine.IsRunning() || ptr.Deref(machineScope.ProxmoxMachine.Status.Ready, false) { // We only want to do this before the machine was started or is ready return false, nil } @@ -233,14 +271,17 @@ func reconcileVirtualMachineConfig(ctx context.Context, machineScope *scope.Mach // CPU & Memory var vmOptions []proxmox.VirtualMachineOption - if value := machineScope.ProxmoxMachine.Spec.NumSockets; value > 0 && vmConfig.Sockets != int(value) { - vmOptions = append(vmOptions, proxmox.VirtualMachineOption{Name: optionSockets, Value: value}) + sockets := ptr.Deref(machineScope.ProxmoxMachine.Spec.NumSockets, 0) + cores := ptr.Deref(machineScope.ProxmoxMachine.Spec.NumCores, 0) + memory := ptr.Deref(machineScope.ProxmoxMachine.Spec.MemoryMiB, 0) + if sockets > 0 && vmConfig.Sockets != int(sockets) { + vmOptions = append(vmOptions, proxmox.VirtualMachineOption{Name: optionSockets, Value: sockets}) } - if value := machineScope.ProxmoxMachine.Spec.NumCores; value > 0 && vmConfig.Cores != int(value) { - vmOptions = append(vmOptions, proxmox.VirtualMachineOption{Name: optionCores, Value: value}) + if cores > 0 && vmConfig.Cores != int(cores) { + vmOptions = append(vmOptions, proxmox.VirtualMachineOption{Name: optionCores, Value: cores}) } - if value := machineScope.ProxmoxMachine.Spec.MemoryMiB; value > 0 && int32(vmConfig.Memory) != value { - vmOptions = append(vmOptions, proxmox.VirtualMachineOption{Name: optionMemory, Value: value}) + if memory > 0 && int(vmConfig.Memory) != int(memory) { + vmOptions = append(vmOptions, proxmox.VirtualMachineOption{Name: optionMemory, Value: memory}) } // Description @@ -252,23 +293,12 @@ func reconcileVirtualMachineConfig(ctx context.Context, machineScope *scope.Mach // Network vmbrs. if machineScope.ProxmoxMachine.Spec.Network != nil && shouldUpdateNetworkDevices(machineScope) { - // adding the default network device. - vmOptions = append(vmOptions, proxmox.VirtualMachineOption{ - Name: infrav1alpha1.DefaultNetworkDevice, - Value: formatNetworkDevice( - *machineScope.ProxmoxMachine.Spec.Network.Default.Model, - machineScope.ProxmoxMachine.Spec.Network.Default.Bridge, - machineScope.ProxmoxMachine.Spec.Network.Default.MTU, - machineScope.ProxmoxMachine.Spec.Network.Default.VLAN, - ), - }) - // handing additional network devices. - devices := machineScope.ProxmoxMachine.Spec.Network.AdditionalDevices + devices := machineScope.ProxmoxMachine.Spec.Network.NetworkDevices for _, v := range devices { vmOptions = append(vmOptions, proxmox.VirtualMachineOption{ - Name: v.Name, - Value: formatNetworkDevice(*v.Model, v.Bridge, v.MTU, v.VLAN), + Name: *v.Name, + Value: formatNetworkDevice(*v.Model, *v.Bridge, v.MTU, v.VLAN), }) } } @@ -299,25 +329,30 @@ func reconcileVirtualMachineConfig(ctx context.Context, machineScope *scope.Mach } machineScope.ProxmoxMachine.Status.TaskRef = ptr.To(string(task.UPID)) + + conditions.MarkFalse(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition, infrav1.WaitingForDiskReconcilationReason, clusterv1.ConditionSeverityInfo, "") return true, nil } -func reconcileMachineAddresses(scope *scope.MachineScope) error { - addr, err := getMachineAddresses(scope) +func reconcileMachineAddresses(machineScope *scope.MachineScope) error { + if conditions.GetReason(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition) != infrav1.WaitingForClusterAPIMachineAddressesReason { + // Machine is in the wrong state to reconcile, we only reconcile powered up VMs + return nil + } + + addr, err := getClusterAPIMachineAddresses(machineScope) if err != nil { - scope.Error(err, "failed to retrieve machine addresses") + machineScope.Error(err, "failed to retrieve machine addresses") return err } - scope.SetAddresses(addr) + machineScope.SetAddresses(addr) + + conditions.MarkFalse(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition, infrav1.WaitingForCloudInitReason, clusterv1.ConditionSeverityInfo, "") return nil } -func getMachineAddresses(scope *scope.MachineScope) ([]clusterv1.MachineAddress, error) { - if !machineHasIPAddress(scope.ProxmoxMachine) { - return nil, errors.New("machine does not yet have an ip address") - } - +func getClusterAPIMachineAddresses(scope *scope.MachineScope) ([]clusterv1.MachineAddress, error) { if !scope.VirtualMachine.IsRunning() { return nil, errors.New("unable to apply configuration as long as the virtual machine is not running") } @@ -329,17 +364,18 @@ func getMachineAddresses(scope *scope.MachineScope) ([]clusterv1.MachineAddress, }, } + // TODO: DHCP as InternalIP if scope.InfraCluster.ProxmoxCluster.Spec.IPv4Config != nil { addresses = append(addresses, clusterv1.MachineAddress{ Type: clusterv1.MachineInternalIP, - Address: scope.ProxmoxMachine.Status.IPAddresses[infrav1alpha1.DefaultNetworkDevice].IPV4, + Address: scope.ProxmoxMachine.Status.IPAddresses[infrav1.DefaultNetworkDevice].IPv4[0], // TODO: Unfuck this }) } if scope.InfraCluster.ProxmoxCluster.Spec.IPv6Config != nil { addresses = append(addresses, clusterv1.MachineAddress{ Type: clusterv1.MachineInternalIP, - Address: scope.ProxmoxMachine.Status.IPAddresses[infrav1alpha1.DefaultNetworkDevice].IPV6, + Address: scope.ProxmoxMachine.Status.IPAddresses[infrav1.DefaultNetworkDevice].IPv6[0], // TODO: Unfuck this }) } @@ -389,7 +425,7 @@ func createVM(ctx context.Context, scope *scope.MachineScope) (proxmox.VMCloneRe } if scope.InfraCluster.ProxmoxCluster.Status.NodeLocations == nil { - scope.InfraCluster.ProxmoxCluster.Status.NodeLocations = new(infrav1alpha1.NodeLocations) + scope.InfraCluster.ProxmoxCluster.Status.NodeLocations = new(infrav1.NodeLocations) } // if no target was specified but we have a set of nodes defined in the spec, we want to evenly distribute @@ -418,7 +454,7 @@ func createVM(ctx context.Context, scope *scope.MachineScope) (proxmox.VMCloneRe if errors.Is(err, goproxmox.ErrTemplateNotFound) { scope.SetFailureMessage(err) scope.SetFailureReason(capierrors.MachineStatusError("VMTemplateNotFound")) - conditions.MarkFalse(scope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition, infrav1alpha1.VMProvisionFailedReason, clusterv1.ConditionSeverityError, "%s", err) + conditions.MarkFalse(scope.ProxmoxMachine, infrav1.VMProvisionedCondition, infrav1.VMProvisionFailedReason, clusterv1.ConditionSeverityError, "%s", err) } return proxmox.VMCloneResponse{}, err } @@ -437,7 +473,7 @@ func createVM(ctx context.Context, scope *scope.MachineScope) (proxmox.VMCloneRe // if the creation was successful, we store the information about the node in the // cluster status - scope.InfraCluster.ProxmoxCluster.AddNodeLocation(infrav1alpha1.NodeLocation{ + scope.InfraCluster.ProxmoxCluster.AddNodeLocation(infrav1.NodeLocation{ Machine: corev1.LocalObjectReference{Name: options.Name}, Node: node, }, util.IsControlPlaneMachine(scope.Machine)) diff --git a/internal/service/vmservice/vm_test.go b/internal/service/vmservice/vm_test.go index cbec7958..33db319f 100644 --- a/internal/service/vmservice/vm_test.go +++ b/internal/service/vmservice/vm_test.go @@ -27,8 +27,9 @@ import ( "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" capierrors "sigs.k8s.io/cluster-api/errors" //nolint:staticcheck + "sigs.k8s.io/cluster-api/util/conditions" - infrav1alpha1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" + infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha2" "github.com/ionos-cloud/cluster-api-provider-proxmox/internal/service/scheduler" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/proxmox" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/proxmox/goproxmox" @@ -36,31 +37,37 @@ import ( ) func TestReconcileVM_EverythingReady(t *testing.T) { - machineScope, proxmoxClient, _ := setupReconcilerTest(t) + machineScope, proxmoxClient, _ := setupReconcilerTestWithCondition(t, infrav1.WaitingForCloudInitReason) vm := newRunningVM() machineScope.SetVirtualMachineID(int64(vm.VMID)) - machineScope.ProxmoxMachine.Status.IPAddresses = map[string]infrav1alpha1.IPAddress{infrav1alpha1.DefaultNetworkDevice: {IPV4: "10.10.10.10"}} + // machineScope.ProxmoxMachine.Status.IPAddresses = map[string]*infrav1.IPAddresses{infrav1.DefaultNetworkDevice: {IPv4: []string{"10.10.10.10"}}} machineScope.ProxmoxMachine.Status.BootstrapDataProvided = ptr.To(true) - machineScope.ProxmoxMachine.Status.Ready = true + machineScope.ProxmoxMachine.Status.Ready = ptr.To(true) - proxmoxClient.EXPECT().GetVM(context.Background(), "node1", int64(123)).Return(vm, nil).Once() + proxmoxClient.EXPECT().GetVM(context.Background(), "node1", int64(123)).Return(vm, nil).Twice() proxmoxClient.EXPECT().CloudInitStatus(context.Background(), vm).Return(false, nil).Once() proxmoxClient.EXPECT().QemuAgentStatus(context.Background(), vm).Return(nil).Once() result, err := ReconcileVM(context.Background(), machineScope) require.NoError(t, err) - require.Equal(t, infrav1alpha1.VirtualMachineStateReady, result.State) - require.Equal(t, "10.10.10.10", machineScope.ProxmoxMachine.Status.Addresses[1].Address) + // TODO: check that result is such that requeueing is necessary + + // requeue the VM since it is not fully done with the state transition + result, err = ReconcileVM(context.Background(), machineScope) + require.NoError(t, err) + + require.Equal(t, infrav1.VirtualMachineStateReady, result.State) + // require.Equal(t, "10.10.10.10", machineScope.ProxmoxMachine.Status.Addresses[1].Address) } func TestReconcileVM_QemuAgentCheckDisabled(t *testing.T) { - machineScope, proxmoxClient, _ := setupReconcilerTest(t) + machineScope, proxmoxClient, _ := setupReconcilerTestWithCondition(t, infrav1.WaitingForBootstrapReadyReason) vm := newRunningVM() machineScope.SetVirtualMachineID(int64(vm.VMID)) - machineScope.ProxmoxMachine.Status.IPAddresses = map[string]infrav1alpha1.IPAddress{infrav1alpha1.DefaultNetworkDevice: {IPV4: "10.10.10.10"}} + // machineScope.ProxmoxMachine.Status.IPAddresses = map[string]*infrav1.IPAddresses{infrav1.DefaultNetworkDevice: {IPv4: []string{"10.10.10.10"}}} machineScope.ProxmoxMachine.Status.BootstrapDataProvided = ptr.To(true) - machineScope.ProxmoxMachine.Status.Ready = true - machineScope.ProxmoxMachine.Spec.Checks = &infrav1alpha1.ProxmoxMachineChecks{ + machineScope.ProxmoxMachine.Status.Ready = ptr.To(true) + machineScope.ProxmoxMachine.Spec.Checks = &infrav1.ProxmoxMachineChecks{ SkipQemuGuestAgent: ptr.To(true), } @@ -69,38 +76,44 @@ func TestReconcileVM_QemuAgentCheckDisabled(t *testing.T) { result, err := ReconcileVM(context.Background(), machineScope) require.NoError(t, err) - require.Equal(t, infrav1alpha1.VirtualMachineStateReady, result.State) - require.Equal(t, "10.10.10.10", machineScope.ProxmoxMachine.Status.Addresses[1].Address) + require.Equal(t, infrav1.VirtualMachineStateReady, result.State) + // require.Equal(t, "10.10.10.10", machineScope.ProxmoxMachine.Status.Addresses[1].Address) } func TestReconcileVM_CloudInitCheckDisabled(t *testing.T) { - machineScope, proxmoxClient, _ := setupReconcilerTest(t) + machineScope, proxmoxClient, _ := setupReconcilerTestWithCondition(t, infrav1.WaitingForCloudInitReason) vm := newRunningVM() machineScope.SetVirtualMachineID(int64(vm.VMID)) - machineScope.ProxmoxMachine.Status.IPAddresses = map[string]infrav1alpha1.IPAddress{infrav1alpha1.DefaultNetworkDevice: {IPV4: "10.10.10.10"}} + // machineScope.ProxmoxMachine.Status.IPAddresses = map[string]*infrav1.IPAddresses{infrav1.DefaultNetworkDevice: {IPv4: []string{"10.10.10.10"}}} machineScope.ProxmoxMachine.Status.BootstrapDataProvided = ptr.To(true) - machineScope.ProxmoxMachine.Status.Ready = true - machineScope.ProxmoxMachine.Spec.Checks = &infrav1alpha1.ProxmoxMachineChecks{ + machineScope.ProxmoxMachine.Status.Ready = ptr.To(true) + machineScope.ProxmoxMachine.Spec.Checks = &infrav1.ProxmoxMachineChecks{ SkipCloudInitStatus: ptr.To(true), } - proxmoxClient.EXPECT().GetVM(context.Background(), "node1", int64(123)).Return(vm, nil).Once() + proxmoxClient.EXPECT().GetVM(context.Background(), "node1", int64(123)).Return(vm, nil).Twice() proxmoxClient.EXPECT().QemuAgentStatus(context.Background(), vm).Return(nil) result, err := ReconcileVM(context.Background(), machineScope) require.NoError(t, err) - require.Equal(t, infrav1alpha1.VirtualMachineStateReady, result.State) - require.Equal(t, "10.10.10.10", machineScope.ProxmoxMachine.Status.Addresses[1].Address) + // TODO: check that result is such that requeueing is necessary + + // requeue the VM since it is not fully done with the state transition + result, err = ReconcileVM(context.Background(), machineScope) + require.NoError(t, err) + + require.Equal(t, infrav1.VirtualMachineStateReady, result.State) + // require.Equal(t, "10.10.10.10", machineScope.ProxmoxMachine.Status.Addresses[1].Address) } func TestReconcileVM_InitCheckDisabled(t *testing.T) { - machineScope, proxmoxClient, _ := setupReconcilerTest(t) + machineScope, proxmoxClient, _ := setupReconcilerTestWithCondition(t, infrav1.WaitingForBootstrapReadyReason) vm := newRunningVM() machineScope.SetVirtualMachineID(int64(vm.VMID)) - machineScope.ProxmoxMachine.Status.IPAddresses = map[string]infrav1alpha1.IPAddress{infrav1alpha1.DefaultNetworkDevice: {IPV4: "10.10.10.10"}} + // machineScope.ProxmoxMachine.Status.IPAddresses = map[string]*infrav1.IPAddresses{infrav1.DefaultNetworkDevice: {IPv4: []string{"10.10.10.10"}}} machineScope.ProxmoxMachine.Status.BootstrapDataProvided = ptr.To(true) - machineScope.ProxmoxMachine.Status.Ready = true - machineScope.ProxmoxMachine.Spec.Checks = &infrav1alpha1.ProxmoxMachineChecks{ + machineScope.ProxmoxMachine.Status.Ready = ptr.To(true) + machineScope.ProxmoxMachine.Spec.Checks = &infrav1.ProxmoxMachineChecks{ SkipCloudInitStatus: ptr.To(true), SkipQemuGuestAgent: ptr.To(true), } @@ -109,14 +122,14 @@ func TestReconcileVM_InitCheckDisabled(t *testing.T) { result, err := ReconcileVM(context.Background(), machineScope) require.NoError(t, err) - require.Equal(t, infrav1alpha1.VirtualMachineStateReady, result.State) - require.Equal(t, "10.10.10.10", machineScope.ProxmoxMachine.Status.Addresses[1].Address) + require.Equal(t, infrav1.VirtualMachineStateReady, result.State) + // require.Equal(t, "10.10.10.10", machineScope.ProxmoxMachine.Status.Addresses[1].Address) } func TestEnsureVirtualMachine_CreateVM_FullOptions(t *testing.T) { - machineScope, proxmoxClient, _ := setupReconcilerTest(t) + machineScope, proxmoxClient, _ := setupReconcilerTestWithCondition(t, infrav1.WaitingForVirtualMachineConfigReason) machineScope.ProxmoxMachine.Spec.Description = ptr.To("test vm") - machineScope.ProxmoxMachine.Spec.Format = ptr.To(infrav1alpha1.TargetStorageFormatRaw) + machineScope.ProxmoxMachine.Spec.Format = ptr.To(infrav1.TargetStorageFormatRaw) machineScope.ProxmoxMachine.Spec.Full = ptr.To(true) machineScope.ProxmoxMachine.Spec.Pool = ptr.To("pool") machineScope.ProxmoxMachine.Spec.SnapName = ptr.To("snap") @@ -142,22 +155,22 @@ func TestEnsureVirtualMachine_CreateVM_FullOptions(t *testing.T) { require.Equal(t, "node2", *machineScope.ProxmoxMachine.Status.ProxmoxNode) require.True(t, machineScope.InfraCluster.ProxmoxCluster.HasMachine(machineScope.Name(), false)) - requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition) + requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition) } func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector(t *testing.T) { vmTemplateTags := []string{"foo", "bar"} - machineScope, proxmoxClient, _ := setupReconcilerTest(t) - machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1alpha1.VirtualMachineCloneSpec{ - TemplateSource: infrav1alpha1.TemplateSource{ - TemplateSelector: &infrav1alpha1.TemplateSelector{ + machineScope, proxmoxClient, _ := setupReconcilerTestWithCondition(t, infrav1.WaitingForVirtualMachineConfigReason) + machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1.VirtualMachineCloneSpec{ + TemplateSource: infrav1.TemplateSource{ + TemplateSelector: &infrav1.TemplateSelector{ MatchTags: vmTemplateTags, }, }, } machineScope.ProxmoxMachine.Spec.Description = ptr.To("test vm") - machineScope.ProxmoxMachine.Spec.Format = ptr.To(infrav1alpha1.TargetStorageFormatRaw) + machineScope.ProxmoxMachine.Spec.Format = ptr.To(infrav1.TargetStorageFormatRaw) machineScope.ProxmoxMachine.Spec.Full = ptr.To(true) machineScope.ProxmoxMachine.Spec.Pool = ptr.To("pool") machineScope.ProxmoxMachine.Spec.SnapName = ptr.To("snap") @@ -186,23 +199,23 @@ func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector(t *testing.T require.Equal(t, "node2", *machineScope.ProxmoxMachine.Status.ProxmoxNode) require.True(t, machineScope.InfraCluster.ProxmoxCluster.HasMachine(machineScope.Name(), false)) - requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition) + requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition) } func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector_VMTemplateNotFound(t *testing.T) { ctx := context.Background() vmTemplateTags := []string{"foo", "bar"} - machineScope, proxmoxClient, _ := setupReconcilerTest(t) - machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1alpha1.VirtualMachineCloneSpec{ - TemplateSource: infrav1alpha1.TemplateSource{ - TemplateSelector: &infrav1alpha1.TemplateSelector{ + machineScope, proxmoxClient, _ := setupReconcilerTestWithCondition(t, infrav1.WaitingForVirtualMachineConfigReason) + machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1.VirtualMachineCloneSpec{ + TemplateSource: infrav1.TemplateSource{ + TemplateSelector: &infrav1.TemplateSelector{ MatchTags: vmTemplateTags, }, }, } machineScope.ProxmoxMachine.Spec.Description = ptr.To("test vm") - machineScope.ProxmoxMachine.Spec.Format = ptr.To(infrav1alpha1.TargetStorageFormatRaw) + machineScope.ProxmoxMachine.Spec.Format = ptr.To(infrav1.TargetStorageFormatRaw) machineScope.ProxmoxMachine.Spec.Full = ptr.To(true) machineScope.ProxmoxMachine.Spec.Pool = ptr.To("pool") machineScope.ProxmoxMachine.Spec.SnapName = ptr.To("snap") @@ -220,7 +233,7 @@ func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector_VMTemplateNo } func TestEnsureVirtualMachine_CreateVM_SelectNode(t *testing.T) { - machineScope, proxmoxClient, _ := setupReconcilerTest(t) + machineScope, proxmoxClient, _ := setupReconcilerTestWithCondition(t, infrav1.WaitingForVirtualMachineConfigReason) machineScope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes = []string{"node1", "node2", "node3"} selectNextNode = func(context.Context, *scope.MachineScope) (string, error) { @@ -238,11 +251,11 @@ func TestEnsureVirtualMachine_CreateVM_SelectNode(t *testing.T) { require.Equal(t, "node3", *machineScope.ProxmoxMachine.Status.ProxmoxNode) require.True(t, machineScope.InfraCluster.ProxmoxCluster.HasMachine(machineScope.Name(), false)) - requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition) + requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition) } func TestEnsureVirtualMachine_CreateVM_SelectNode_MachineAllowedNodes(t *testing.T) { - machineScope, proxmoxClient, _ := setupReconcilerTest(t) + machineScope, proxmoxClient, _ := setupReconcilerTestWithCondition(t, infrav1.WaitingForVirtualMachineConfigReason) machineScope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes = []string{"node1", "node2", "node3", "node4"} machineScope.ProxmoxMachine.Spec.AllowedNodes = []string{"node1", "node2"} @@ -261,11 +274,11 @@ func TestEnsureVirtualMachine_CreateVM_SelectNode_MachineAllowedNodes(t *testing require.Equal(t, "node2", *machineScope.ProxmoxMachine.Status.ProxmoxNode) require.True(t, machineScope.InfraCluster.ProxmoxCluster.HasMachine(machineScope.Name(), false)) - requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition) + requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition) } func TestEnsureVirtualMachine_CreateVM_SelectNode_InsufficientMemory(t *testing.T) { - machineScope, _, _ := setupReconcilerTest(t) + machineScope, _, _ := setupReconcilerTestWithCondition(t, infrav1.WaitingForVirtualMachineConfigReason) machineScope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes = []string{"node1"} selectNextNode = func(context.Context, *scope.MachineScope) (string, error) { @@ -277,13 +290,13 @@ func TestEnsureVirtualMachine_CreateVM_SelectNode_InsufficientMemory(t *testing. require.Error(t, err) require.False(t, machineScope.InfraCluster.ProxmoxCluster.HasMachine(machineScope.Name(), false)) - requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition) + requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition) require.True(t, machineScope.HasFailed()) } func TestEnsureVirtualMachine_CreateVM_VMIDRange(t *testing.T) { - machineScope, proxmoxClient, _ := setupReconcilerTest(t) - machineScope.ProxmoxMachine.Spec.VMIDRange = &infrav1alpha1.VMIDRange{ + machineScope, proxmoxClient, _ := setupReconcilerTestWithCondition(t, infrav1.WaitingForVirtualMachineConfigReason) + machineScope.ProxmoxMachine.Spec.VMIDRange = &infrav1.VMIDRange{ Start: 1000, End: 1002, } @@ -300,12 +313,12 @@ func TestEnsureVirtualMachine_CreateVM_VMIDRange(t *testing.T) { require.Equal(t, int64(1001), machineScope.ProxmoxMachine.GetVirtualMachineID()) require.True(t, machineScope.InfraCluster.ProxmoxCluster.HasMachine(machineScope.Name(), false)) - requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition) + requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition) } func TestEnsureVirtualMachine_CreateVM_VMIDRangeExhausted(t *testing.T) { - machineScope, proxmoxClient, _ := setupReconcilerTest(t) - machineScope.ProxmoxMachine.Spec.VMIDRange = &infrav1alpha1.VMIDRange{ + machineScope, proxmoxClient, _ := setupReconcilerTestWithCondition(t, infrav1.WaitingForVirtualMachineConfigReason) + machineScope.ProxmoxMachine.Spec.VMIDRange = &infrav1.VMIDRange{ Start: 1000, End: 1002, } @@ -321,8 +334,8 @@ func TestEnsureVirtualMachine_CreateVM_VMIDRangeExhausted(t *testing.T) { } func TestEnsureVirtualMachine_CreateVM_VMIDRangeCheckExisting(t *testing.T) { - machineScope, proxmoxClient, kubeClient := setupReconcilerTest(t) - machineScope.ProxmoxMachine.Spec.VMIDRange = &infrav1alpha1.VMIDRange{ + machineScope, proxmoxClient, kubeClient := setupReconcilerTestWithCondition(t, infrav1.WaitingForVirtualMachineConfigReason) + machineScope.ProxmoxMachine.Spec.VMIDRange = &infrav1.VMIDRange{ Start: 1000, End: 1002, } @@ -334,13 +347,13 @@ func TestEnsureVirtualMachine_CreateVM_VMIDRangeCheckExisting(t *testing.T) { vm.Name = "vm1000" proxmoxClient.EXPECT().GetVM(context.Background(), "", int64(1000)).Return(vm, nil).Once() proxmoxClient.Mock.On("CheckID", context.Background(), int64(1000)).Return(false, nil).Once() - infraMachine := infrav1alpha1.ProxmoxMachine{ + infraMachine := infrav1.ProxmoxMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "vm1000", }, - Spec: infrav1alpha1.ProxmoxMachineSpec{ + Spec: ptr.To(infrav1.ProxmoxMachineSpec{ VirtualMachineID: ptr.To(int64(1000)), - }, + }), } machine := clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ @@ -380,7 +393,7 @@ func TestEnsureVirtualMachine_CreateVM_VMIDRangeCheckExisting(t *testing.T) { } func TestEnsureVirtualMachine_FindVM(t *testing.T) { - machineScope, proxmoxClient, _ := setupReconcilerTest(t) + machineScope, proxmoxClient, _ := setupReconcilerTestWithCondition(t, infrav1.WaitingForVirtualMachineConfigReason) machineScope.SetVirtualMachineID(123) vm := newStoppedVM() vm.VirtualMachineConfig.SMBios1 = "uuid=56603c36-46b9-4608-90ae-c731c15eae64" @@ -396,7 +409,7 @@ func TestEnsureVirtualMachine_FindVM(t *testing.T) { } func TestEnsureVirtualMachine_UpdateVMLocation_Error(t *testing.T) { - machineScope, proxmoxClient, _ := setupReconcilerTest(t) + machineScope, proxmoxClient, _ := setupReconcilerTestWithCondition(t, infrav1.WaitingForVirtualMachineConfigReason) machineScope.SetVirtualMachineID(123) proxmoxClient.EXPECT().GetVM(context.Background(), "node1", int64(123)).Return(nil, fmt.Errorf("not found")).Once() @@ -407,7 +420,7 @@ func TestEnsureVirtualMachine_UpdateVMLocation_Error(t *testing.T) { } func TestReconcileVirtualMachineConfig_NoConfig(t *testing.T) { - machineScope, _, _ := setupReconcilerTest(t) + machineScope, _, _ := setupReconcilerTestWithCondition(t, infrav1.WaitingForVirtualMachineConfigReason) vm := newStoppedVM() vm.VirtualMachineConfig.Description = machineScope.ProxmoxMachine.GetName() machineScope.SetVirtualMachine(vm) @@ -418,18 +431,15 @@ func TestReconcileVirtualMachineConfig_NoConfig(t *testing.T) { } func TestReconcileVirtualMachineConfig_ApplyConfig(t *testing.T) { - machineScope, proxmoxClient, _ := setupReconcilerTest(t) + machineScope, proxmoxClient, _ := setupReconcilerTestWithCondition(t, infrav1.CloningReason) machineScope.ProxmoxMachine.Spec.Description = ptr.To("test vm") - machineScope.ProxmoxMachine.Spec.NumSockets = 4 - machineScope.ProxmoxMachine.Spec.NumCores = 4 - machineScope.ProxmoxMachine.Spec.MemoryMiB = 16 * 1024 - machineScope.ProxmoxMachine.Spec.Network = &infrav1alpha1.NetworkSpec{ - Default: &infrav1alpha1.NetworkDevice{Bridge: "vmbr0", Model: ptr.To("virtio"), MTU: ptr.To(uint16(1500))}, - AdditionalDevices: []infrav1alpha1.AdditionalNetworkDevice{ - { - Name: "net1", - NetworkDevice: infrav1alpha1.NetworkDevice{Bridge: "vmbr1", Model: ptr.To("virtio"), MTU: ptr.To(uint16(1500))}, - }, + machineScope.ProxmoxMachine.Spec.NumSockets = ptr.To(int32(4)) + machineScope.ProxmoxMachine.Spec.NumCores = ptr.To(int32(4)) + machineScope.ProxmoxMachine.Spec.MemoryMiB = ptr.To(int32(16 * 1024)) + machineScope.ProxmoxMachine.Spec.Network = &infrav1.NetworkSpec{ + NetworkDevices: []infrav1.NetworkDevice{ + {Name: ptr.To("net0"), Bridge: ptr.To("vmbr0"), Model: ptr.To("virtio"), MTU: ptr.To(int32(1500))}, + {Name: ptr.To("net1"), Bridge: ptr.To("vmbr1"), Model: ptr.To("virtio"), MTU: ptr.To(int32(1500))}, }, } @@ -437,12 +447,12 @@ func TestReconcileVirtualMachineConfig_ApplyConfig(t *testing.T) { task := newTask() machineScope.SetVirtualMachine(vm) expectedOptions := []interface{}{ - proxmox.VirtualMachineOption{Name: optionSockets, Value: machineScope.ProxmoxMachine.Spec.NumSockets}, - proxmox.VirtualMachineOption{Name: optionCores, Value: machineScope.ProxmoxMachine.Spec.NumCores}, - proxmox.VirtualMachineOption{Name: optionMemory, Value: machineScope.ProxmoxMachine.Spec.MemoryMiB}, + proxmox.VirtualMachineOption{Name: optionSockets, Value: *machineScope.ProxmoxMachine.Spec.NumSockets}, + proxmox.VirtualMachineOption{Name: optionCores, Value: *machineScope.ProxmoxMachine.Spec.NumCores}, + proxmox.VirtualMachineOption{Name: optionMemory, Value: *machineScope.ProxmoxMachine.Spec.MemoryMiB}, proxmox.VirtualMachineOption{Name: optionDescription, Value: machineScope.ProxmoxMachine.Spec.Description}, - proxmox.VirtualMachineOption{Name: "net0", Value: formatNetworkDevice("virtio", "vmbr0", ptr.To(uint16(1500)), nil)}, - proxmox.VirtualMachineOption{Name: "net1", Value: formatNetworkDevice("virtio", "vmbr1", ptr.To(uint16(1500)), nil)}, + proxmox.VirtualMachineOption{Name: "net0", Value: formatNetworkDevice("virtio", "vmbr0", ptr.To(int32(1500)), nil)}, + proxmox.VirtualMachineOption{Name: "net1", Value: formatNetworkDevice("virtio", "vmbr1", ptr.To(int32(1500)), nil)}, } proxmoxClient.EXPECT().ConfigureVM(context.Background(), vm, expectedOptions...).Return(task, nil).Once() @@ -454,7 +464,7 @@ func TestReconcileVirtualMachineConfig_ApplyConfig(t *testing.T) { } func TestReconcileVirtualMachineConfigTags(t *testing.T) { - machineScope, proxmoxClient, _ := setupReconcilerTest(t) + machineScope, proxmoxClient, _ := setupReconcilerTestWithCondition(t, infrav1.CloningReason) // CASE: Multiple tags machineScope.ProxmoxMachine.Spec.Tags = []string{"tag1", "tag2"} @@ -487,6 +497,9 @@ func TestReconcileVirtualMachineConfigTags(t *testing.T) { proxmoxClient.EXPECT().ConfigureVM(context.Background(), vm, expectedOptions...).Return(task, nil).Once() + // reset stateMachine to before VirtualMachineConfig + conditions.MarkFalse(machineScope.ProxmoxMachine, infrav1.VMProvisionedCondition, infrav1.CloningReason, clusterv1.ConditionSeverityInfo, "") + requeue, err = reconcileVirtualMachineConfig(context.Background(), machineScope) require.NoError(t, err) require.True(t, requeue) @@ -494,9 +507,9 @@ func TestReconcileVirtualMachineConfigTags(t *testing.T) { } func TestReconcileDisks_RunningVM(t *testing.T) { - machineScope, _, _ := setupReconcilerTest(t) - machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ - BootVolume: &infrav1alpha1.DiskSize{Disk: "ide0", SizeGB: 100}, + machineScope, _, _ := setupReconcilerTestWithCondition(t, infrav1.WaitingForVirtualMachineConfigReason) + machineScope.ProxmoxMachine.Spec.Disks = &infrav1.Storage{ + BootVolume: &infrav1.DiskSize{Disk: "ide0", SizeGB: 100}, } machineScope.SetVirtualMachine(newRunningVM()) @@ -504,9 +517,9 @@ func TestReconcileDisks_RunningVM(t *testing.T) { } func TestReconcileDisks_ResizeDisk(t *testing.T) { - machineScope, proxmoxClient, _ := setupReconcilerTest(t) - machineScope.ProxmoxMachine.Spec.Disks = &infrav1alpha1.Storage{ - BootVolume: &infrav1alpha1.DiskSize{Disk: "ide0", SizeGB: 100}, + machineScope, proxmoxClient, _ := setupReconcilerTestWithCondition(t, infrav1.WaitingForDiskReconcilationReason) + machineScope.ProxmoxMachine.Spec.Disks = &infrav1.Storage{ + BootVolume: &infrav1.DiskSize{Disk: "ide0", SizeGB: 100}, } vm := newStoppedVM() machineScope.SetVirtualMachine(vm) @@ -517,12 +530,12 @@ func TestReconcileDisks_ResizeDisk(t *testing.T) { require.NoError(t, reconcileDisks(context.Background(), machineScope)) } -func TestReconcileMachineAddresses_IPV4(t *testing.T) { - machineScope, _, _ := setupReconcilerTest(t) +func TestReconcileMachineAddresses_IPv4(t *testing.T) { + machineScope, _, _ := setupReconcilerTestWithCondition(t, infrav1.WaitingForClusterAPIMachineAddressesReason) vm := newRunningVM() machineScope.SetVirtualMachine(vm) machineScope.SetVirtualMachineID(int64(vm.VMID)) - machineScope.ProxmoxMachine.Status.IPAddresses = map[string]infrav1alpha1.IPAddress{infrav1alpha1.DefaultNetworkDevice: {IPV4: "10.10.10.10"}} + machineScope.ProxmoxMachine.Status.IPAddresses = map[string]*infrav1.IPAddresses{infrav1.DefaultNetworkDevice: {IPv4: []string{"10.10.10.10"}}} machineScope.ProxmoxMachine.Status.BootstrapDataProvided = ptr.To(true) require.NoError(t, reconcileMachineAddresses(machineScope)) @@ -530,10 +543,10 @@ func TestReconcileMachineAddresses_IPV4(t *testing.T) { require.Equal(t, machineScope.ProxmoxMachine.Status.Addresses[1].Address, "10.10.10.10") } -func TestReconcileMachineAddresses_IPV6(t *testing.T) { - machineScope, _, _ := setupReconcilerTest(t) +func TestReconcileMachineAddresses_IPv6(t *testing.T) { + machineScope, _, _ := setupReconcilerTestWithCondition(t, infrav1.WaitingForClusterAPIMachineAddressesReason) machineScope.InfraCluster.ProxmoxCluster.Spec.IPv4Config = nil - machineScope.InfraCluster.ProxmoxCluster.Spec.IPv6Config = &infrav1alpha1.IPConfigSpec{ + machineScope.InfraCluster.ProxmoxCluster.Spec.IPv6Config = &infrav1.IPConfigSpec{ Addresses: []string{"2001:db8::/64"}, Prefix: 64, Gateway: "2001:db8::1", @@ -542,7 +555,7 @@ func TestReconcileMachineAddresses_IPV6(t *testing.T) { vm := newRunningVM() machineScope.SetVirtualMachine(vm) machineScope.SetVirtualMachineID(int64(vm.VMID)) - machineScope.ProxmoxMachine.Status.IPAddresses = map[string]infrav1alpha1.IPAddress{infrav1alpha1.DefaultNetworkDevice: {IPV6: "2001:db8::2"}} + machineScope.ProxmoxMachine.Status.IPAddresses = map[string]*infrav1.IPAddresses{infrav1.DefaultNetworkDevice: {IPv6: []string{"2001:db8::2"}}} machineScope.ProxmoxMachine.Status.BootstrapDataProvided = ptr.To(true) require.NoError(t, reconcileMachineAddresses(machineScope)) @@ -551,8 +564,8 @@ func TestReconcileMachineAddresses_IPV6(t *testing.T) { } func TestReconcileMachineAddresses_DualStack(t *testing.T) { - machineScope, _, _ := setupReconcilerTest(t) - machineScope.InfraCluster.ProxmoxCluster.Spec.IPv6Config = &infrav1alpha1.IPConfigSpec{ + machineScope, _, _ := setupReconcilerTestWithCondition(t, infrav1.WaitingForClusterAPIMachineAddressesReason) + machineScope.InfraCluster.ProxmoxCluster.Spec.IPv6Config = &infrav1.IPConfigSpec{ Addresses: []string{"2001:db8::/64"}, Prefix: 64, Gateway: "2001:db8::1", @@ -561,7 +574,7 @@ func TestReconcileMachineAddresses_DualStack(t *testing.T) { vm := newRunningVM() machineScope.SetVirtualMachine(vm) machineScope.SetVirtualMachineID(int64(vm.VMID)) - machineScope.ProxmoxMachine.Status.IPAddresses = map[string]infrav1alpha1.IPAddress{infrav1alpha1.DefaultNetworkDevice: {IPV4: "10.10.10.10", IPV6: "2001:db8::2"}} + machineScope.ProxmoxMachine.Status.IPAddresses = map[string]*infrav1.IPAddresses{infrav1.DefaultNetworkDevice: {IPv4: []string{"10.10.10.10"}, IPv6: []string{"2001:db8::2"}}} machineScope.ProxmoxMachine.Status.BootstrapDataProvided = ptr.To(true) require.NoError(t, reconcileMachineAddresses(machineScope)) @@ -571,17 +584,14 @@ func TestReconcileMachineAddresses_DualStack(t *testing.T) { } func TestReconcileVirtualMachineConfigVLAN(t *testing.T) { - machineScope, proxmoxClient, _ := setupReconcilerTest(t) - machineScope.ProxmoxMachine.Spec.NumSockets = 4 - machineScope.ProxmoxMachine.Spec.NumCores = 4 - machineScope.ProxmoxMachine.Spec.MemoryMiB = 16 * 1024 - machineScope.ProxmoxMachine.Spec.Network = &infrav1alpha1.NetworkSpec{ - Default: &infrav1alpha1.NetworkDevice{Bridge: "vmbr0", Model: ptr.To("virtio"), VLAN: ptr.To(uint16(100))}, - AdditionalDevices: []infrav1alpha1.AdditionalNetworkDevice{ - { - Name: "net1", - NetworkDevice: infrav1alpha1.NetworkDevice{Bridge: "vmbr1", Model: ptr.To("virtio"), VLAN: ptr.To(uint16(100))}, - }, + machineScope, proxmoxClient, _ := setupReconcilerTestWithCondition(t, infrav1.CloningReason) + machineScope.ProxmoxMachine.Spec.NumSockets = ptr.To(int32(4)) + machineScope.ProxmoxMachine.Spec.NumCores = ptr.To(int32(4)) + machineScope.ProxmoxMachine.Spec.MemoryMiB = ptr.To(int32(16 * 1024)) + machineScope.ProxmoxMachine.Spec.Network = &infrav1.NetworkSpec{ + NetworkDevices: []infrav1.NetworkDevice{ + {Name: ptr.To("net0"), Bridge: ptr.To("vmbr0"), Model: ptr.To("virtio"), VLAN: ptr.To(int32(100))}, + {Name: ptr.To("net1"), Bridge: ptr.To("vmbr1"), Model: ptr.To("virtio"), VLAN: ptr.To(int32(100))}, }, } @@ -589,11 +599,11 @@ func TestReconcileVirtualMachineConfigVLAN(t *testing.T) { task := newTask() machineScope.SetVirtualMachine(vm) expectedOptions := []interface{}{ - proxmox.VirtualMachineOption{Name: optionSockets, Value: machineScope.ProxmoxMachine.Spec.NumSockets}, - proxmox.VirtualMachineOption{Name: optionCores, Value: machineScope.ProxmoxMachine.Spec.NumCores}, - proxmox.VirtualMachineOption{Name: optionMemory, Value: machineScope.ProxmoxMachine.Spec.MemoryMiB}, - proxmox.VirtualMachineOption{Name: "net0", Value: formatNetworkDevice("virtio", "vmbr0", nil, ptr.To(uint16(100)))}, - proxmox.VirtualMachineOption{Name: "net1", Value: formatNetworkDevice("virtio", "vmbr1", nil, ptr.To(uint16(100)))}, + proxmox.VirtualMachineOption{Name: optionSockets, Value: *machineScope.ProxmoxMachine.Spec.NumSockets}, + proxmox.VirtualMachineOption{Name: optionCores, Value: *machineScope.ProxmoxMachine.Spec.NumCores}, + proxmox.VirtualMachineOption{Name: optionMemory, Value: *machineScope.ProxmoxMachine.Spec.MemoryMiB}, + proxmox.VirtualMachineOption{Name: "net0", Value: formatNetworkDevice("virtio", "vmbr0", nil, ptr.To(int32(100)))}, + proxmox.VirtualMachineOption{Name: "net1", Value: formatNetworkDevice("virtio", "vmbr1", nil, ptr.To(int32(100)))}, } proxmoxClient.EXPECT().ConfigureVM(context.TODO(), vm, expectedOptions...).Return(task, nil).Once() @@ -605,7 +615,7 @@ func TestReconcileVirtualMachineConfigVLAN(t *testing.T) { } func TestReconcileDisks_UnmountCloudInitISO(t *testing.T) { - machineScope, proxmoxClient, _ := setupReconcilerTest(t) + machineScope, proxmoxClient, _ := setupReconcilerTestWithCondition(t, infrav1.WaitingForBootstrapReadyReason) vm := newRunningVM() vm.VirtualMachineConfig.IDE0 = "local:iso/cloud-init.iso,media=cdrom" @@ -617,12 +627,12 @@ func TestReconcileDisks_UnmountCloudInitISO(t *testing.T) { } func TestReconcileVM_CloudInitFailed(t *testing.T) { - machineScope, proxmoxClient, _ := setupReconcilerTest(t) + machineScope, proxmoxClient, _ := setupReconcilerTestWithCondition(t, infrav1.WaitingForCloudInitReason) vm := newRunningVM() machineScope.SetVirtualMachineID(int64(vm.VMID)) - machineScope.ProxmoxMachine.Status.IPAddresses = map[string]infrav1alpha1.IPAddress{infrav1alpha1.DefaultNetworkDevice: {IPV4: "10.10.10.10"}} + machineScope.ProxmoxMachine.Status.IPAddresses = map[string]*infrav1.IPAddresses{infrav1.DefaultNetworkDevice: {IPv4: []string{"10.10.10.10"}}} machineScope.ProxmoxMachine.Status.BootstrapDataProvided = ptr.To(true) - machineScope.ProxmoxMachine.Status.Ready = true + machineScope.ProxmoxMachine.Status.Ready = ptr.To(true) proxmoxClient.EXPECT().GetVM(context.Background(), "node1", int64(123)).Return(vm, nil).Once() proxmoxClient.EXPECT().CloudInitStatus(context.Background(), vm).Return(false, goproxmox.ErrCloudInitFailed).Once() @@ -635,12 +645,12 @@ func TestReconcileVM_CloudInitFailed(t *testing.T) { } func TestReconcileVM_CloudInitRunning(t *testing.T) { - machineScope, proxmoxClient, _ := setupReconcilerTest(t) + machineScope, proxmoxClient, _ := setupReconcilerTestWithCondition(t, infrav1.WaitingForCloudInitReason) vm := newRunningVM() machineScope.SetVirtualMachineID(int64(vm.VMID)) - machineScope.ProxmoxMachine.Status.IPAddresses = map[string]infrav1alpha1.IPAddress{infrav1alpha1.DefaultNetworkDevice: {IPV4: "10.10.10.10"}} + machineScope.ProxmoxMachine.Status.IPAddresses = map[string]*infrav1.IPAddresses{infrav1.DefaultNetworkDevice: {IPv4: []string{"10.10.10.10"}}} machineScope.ProxmoxMachine.Status.BootstrapDataProvided = ptr.To(true) - machineScope.ProxmoxMachine.Status.Ready = true + machineScope.ProxmoxMachine.Status.Ready = ptr.To(true) proxmoxClient.EXPECT().GetVM(context.Background(), "node1", int64(123)).Return(vm, nil).Once() proxmoxClient.EXPECT().CloudInitStatus(context.Background(), vm).Return(true, nil).Once() @@ -648,5 +658,5 @@ func TestReconcileVM_CloudInitRunning(t *testing.T) { result, err := ReconcileVM(context.Background(), machineScope) require.NoError(t, err) - require.Equal(t, infrav1alpha1.VirtualMachineStatePending, result.State) + require.Equal(t, infrav1.VirtualMachineStatePending, result.State) } diff --git a/internal/webhook/proxmoxcluster_webhook.go b/internal/webhook/proxmoxcluster_webhook.go index 0aa2f540..f8de0ba7 100644 --- a/internal/webhook/proxmoxcluster_webhook.go +++ b/internal/webhook/proxmoxcluster_webhook.go @@ -32,7 +32,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" + infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha2" ) var _ admission.CustomValidator = &ProxmoxCluster{} @@ -50,7 +50,7 @@ func (p *ProxmoxCluster) SetupWebhookWithManager(mgr ctrl.Manager) error { Complete() } -//+kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1alpha1-proxmoxcluster,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,sideEffects=None,groups=infrastructure.cluster.x-k8s.io,resources=proxmoxclusters,versions=v1alpha1,name=validation.proxmoxcluster.infrastructure.cluster.x-k8s.io,admissionReviewVersions=v1 +//+kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1alpha2-proxmoxcluster,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,sideEffects=None,groups=infrastructure.cluster.x-k8s.io,resources=proxmoxclusters,versions=v1alpha2,name=validation.proxmoxcluster.infrastructure.cluster.x-k8s.io,admissionReviewVersions=v1 // ValidateCreate implements the creation validation function. func (*ProxmoxCluster) ValidateCreate(_ context.Context, obj runtime.Object) (warnings admission.Warnings, err error) { @@ -96,7 +96,7 @@ func (*ProxmoxCluster) ValidateUpdate(_ context.Context, _ runtime.Object, newOb func validateControlPlaneEndpoint(cluster *infrav1.ProxmoxCluster) error { // Skipping the validation of the Control Plane endpoint in case of externally managed Control Plane: // the Cluster API Control Plane provider will eventually provide the LB. - if cluster.Spec.ExternalManagedControlPlane { + if *cluster.Spec.ExternalManagedControlPlane { return nil } @@ -157,7 +157,7 @@ func validateControlPlaneEndpoint(cluster *infrav1.ProxmoxCluster) error { } } - // IPV6 + // IPv6 if cluster.Spec.IPv6Config != nil { set6, err := buildSetFromAddresses(cluster.Spec.IPv6Config.Addresses) if err != nil { diff --git a/internal/webhook/proxmoxcluster_webhook_test.go b/internal/webhook/proxmoxcluster_webhook_test.go index a79009f9..e99cfac4 100644 --- a/internal/webhook/proxmoxcluster_webhook_test.go +++ b/internal/webhook/proxmoxcluster_webhook_test.go @@ -25,7 +25,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" - infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" + infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha2" ) var _ = Describe("Controller Test", func() { @@ -68,25 +68,25 @@ var _ = Describe("Controller Test", func() { g.Expect(k8sClient.Create(testEnv.GetContext(), &cluster)).To(Succeed()) }) - It("should allow valid endpoint IP4", func() { - cluster := validProxmoxCluster("succeed-test-cluster-with-ip4") + It("should allow valid IPv4 endpoint", func() { + cluster := validProxmoxCluster("succeed-test-cluster-with-ipv4") cluster.Spec.ControlPlaneEndpoint.Host = "127.0.0.1" g.Expect(k8sClient.Create(testEnv.GetContext(), &cluster)).To(Succeed()) }) - It("should allow valid endpoint IP6", func() { - cluster := validProxmoxCluster("succeed-test-cluster-with-ip6") + It("should allow valid IPv6 endpoint", func() { + cluster := validProxmoxCluster("succeed-test-cluster-with-ipv6") cluster.Spec.ControlPlaneEndpoint.Host = "::1" g.Expect(k8sClient.Create(testEnv.GetContext(), &cluster)).To(Succeed()) }) - It("should disallow invalid IPV4 IPs", func() { + It("should disallow invalid IPv4 addresses", func() { cluster := invalidProxmoxCluster("test-cluster") cluster.Spec.IPv4Config.Addresses = []string{"invalid"} g.Expect(k8sClient.Create(testEnv.GetContext(), &cluster)).To(MatchError(ContainSubstring("provided addresses are not valid IP addresses, ranges or CIDRs"))) }) - It("should disallow invalid IPV6 IPs", func() { + It("should disallow invalid IPv6 addresses", func() { cluster := validProxmoxCluster("test-cluster") cluster.Spec.IPv6Config = &infrav1.IPConfigSpec{ Addresses: []string{"invalid"}, diff --git a/internal/webhook/proxmoxmachine_webhook.go b/internal/webhook/proxmoxmachine_webhook.go index d560ed91..5852acb4 100644 --- a/internal/webhook/proxmoxmachine_webhook.go +++ b/internal/webhook/proxmoxmachine_webhook.go @@ -28,7 +28,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" + infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha2" ) // ProxmoxMachine is a type that implements @@ -44,7 +44,7 @@ func (p *ProxmoxMachine) SetupWebhookWithManager(mgr ctrl.Manager) error { Complete() } -//+kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1alpha1-proxmoxmachine,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,sideEffects=None,groups=infrastructure.cluster.x-k8s.io,resources=proxmoxmachines,versions=v1alpha1,name=validation.proxmoxmachine.infrastructure.cluster.x-k8s.io,admissionReviewVersions=v1 +//+kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1alpha2-proxmoxmachine,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,sideEffects=None,groups=infrastructure.cluster.x-k8s.io,resources=proxmoxmachines,versions=v1alpha2,name=validation.proxmoxmachine.infrastructure.cluster.x-k8s.io,admissionReviewVersions=v1 // ValidateCreate implements the creation validation function. func (p *ProxmoxMachine) ValidateCreate(_ context.Context, obj runtime.Object) (warnings admission.Warnings, err error) { @@ -99,69 +99,36 @@ func validateNetworks(machine *infrav1.ProxmoxMachine) error { gk, name := machine.GroupVersionKind().GroupKind(), machine.GetName() - if machine.Spec.Network != nil && machine.Spec.Network.Default == nil { - return apierrors.NewInvalid( - gk, - name, - field.ErrorList{ - field.Invalid( - field.NewPath("spec", "network", "default"), machine.Spec.Network.Default, "default network device must be set when setting network spec"), - }) - } - - if machine.Spec.Network.Default != nil { - err := validateNetworkDeviceMTU(machine.Spec.Network.Default) + for i := range machine.Spec.Network.NetworkDevices { + err := validateNetworkDeviceMTU(&machine.Spec.Network.NetworkDevices[i]) if err != nil { return apierrors.NewInvalid( gk, name, field.ErrorList{ field.Invalid( - field.NewPath("spec", "network", "default", "mtu"), machine.Spec.Network.Default, err.Error()), + field.NewPath("spec", "network", "additionalDevices", fmt.Sprint(i), "mtu"), machine.Spec.Network.NetworkDevices[i], err.Error()), }) } - } - for i := range machine.Spec.Network.AdditionalDevices { - err := validateIPPoolRef(machine.Spec.Network.AdditionalDevices[i]) + err = validateInterfaceConfigMTU(&machine.Spec.Network.NetworkDevices[i].InterfaceConfig) if err != nil { return apierrors.NewInvalid( gk, name, field.ErrorList{ field.Invalid( - field.NewPath("spec", "network", "additionalDevices", fmt.Sprint(i), "IPPoolConfig"), machine.Spec.Network.AdditionalDevices[i], err.Error()), + field.NewPath("spec", "network", "additionalDevices", fmt.Sprint(i), "linkMtu"), machine.Spec.Network.NetworkDevices[i], err.Error()), }) } - - err = validateNetworkDeviceMTU(&machine.Spec.Network.AdditionalDevices[i].NetworkDevice) + err = validateRoutingPolicy(&machine.Spec.Network.NetworkDevices[i].InterfaceConfig.RoutingPolicy) if err != nil { return apierrors.NewInvalid( gk, name, field.ErrorList{ field.Invalid( - field.NewPath("spec", "network", "additionalDevices", fmt.Sprint(i), "mtu"), machine.Spec.Network.AdditionalDevices[i], err.Error()), - }) - } - err = validateInterfaceConfigMTU(&machine.Spec.Network.AdditionalDevices[i].InterfaceConfig) - if err != nil { - return apierrors.NewInvalid( - gk, - name, - field.ErrorList{ - field.Invalid( - field.NewPath("spec", "network", "additionalDevices", fmt.Sprint(i), "linkMtu"), machine.Spec.Network.AdditionalDevices[i], err.Error()), - }) - } - err = validateRoutingPolicy(&machine.Spec.Network.AdditionalDevices[i].InterfaceConfig.RoutingPolicy) - if err != nil { - return apierrors.NewInvalid( - gk, - name, - field.ErrorList{ - field.Invalid( - field.NewPath("spec", "network", "additionalDevices", fmt.Sprint(i), "routingPolicy"), machine.Spec.Network.AdditionalDevices[i], err.Error()), + field.NewPath("spec", "network", "additionalDevices", fmt.Sprint(i), "routingPolicy"), machine.Spec.Network.NetworkDevices[i], err.Error()), }) } } @@ -235,11 +202,3 @@ func validateNetworkDeviceMTU(device *infrav1.NetworkDevice) error { return nil } - -func validateIPPoolRef(net infrav1.AdditionalNetworkDevice) error { - if net.IPv4PoolRef == nil && net.IPv6PoolRef == nil { - return fmt.Errorf("at least one of IPv4PoolRef or IPv6PoolRef must be set") - } - - return nil -} diff --git a/internal/webhook/proxmoxmachine_webhook_test.go b/internal/webhook/proxmoxmachine_webhook_test.go index c39bf0fa..2990d5d1 100644 --- a/internal/webhook/proxmoxmachine_webhook_test.go +++ b/internal/webhook/proxmoxmachine_webhook_test.go @@ -19,15 +19,15 @@ package webhook import ( "time" - corev1 "k8s.io/api/core/v1" - . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" - infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" + infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha2" + . "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/consts" ) var _ = Describe("Controller Test", func() { @@ -36,17 +36,17 @@ var _ = Describe("Controller Test", func() { Context("create proxmox machine", func() { It("should disallow invalid network mtu", func() { machine := invalidMTUProxmoxMachine("test-machine") - g.Expect(k8sClient.Create(testEnv.GetContext(), &machine)).To(MatchError(ContainSubstring("spec.network.default.mtu: Invalid value"))) + g.Expect(k8sClient.Create(testEnv.GetContext(), &machine)).To(MatchError(ContainSubstring("invalid MTU value"))) }) It("should disallow invalid network vlan", func() { machine := invalidVLANProxmoxMachine("test-machine") - g.Expect(k8sClient.Create(testEnv.GetContext(), &machine)).To(MatchError(ContainSubstring("spec.network.default.vlan: Invalid value"))) + g.Expect(k8sClient.Create(testEnv.GetContext(), &machine)).To(MatchError(ContainSubstring("spec.network.networkDevices[0].vlan: Invalid value"))) }) It("should disallow invalid network mtu for additional device", func() { machine := validProxmoxMachine("test-machine") - machine.Spec.Network.AdditionalDevices[0].MTU = ptr.To(uint16(1000)) + machine.Spec.Network.NetworkDevices[0].MTU = ptr.To(int32(1000)) g.Expect(k8sClient.Create(testEnv.GetContext(), &machine)).To(MatchError(ContainSubstring("mtu must be at least 1280 or 1, but was 1000"))) }) @@ -57,13 +57,13 @@ var _ = Describe("Controller Test", func() { It("should disallow invalid network vlan for additional device", func() { machine := validProxmoxMachine("test-machine") - machine.Spec.Network.AdditionalDevices[0].VLAN = ptr.To(uint16(0)) + machine.Spec.Network.NetworkDevices[0].VLAN = ptr.To(int32(0)) g.Expect(k8sClient.Create(testEnv.GetContext(), &machine)).To(MatchError(ContainSubstring("greater than or equal to 1"))) }) It("should disallow invalid link mtu for additional device", func() { machine := validProxmoxMachine("test-machine") - machine.Spec.Network.AdditionalDevices[0].LinkMTU = ptr.To(uint16(1000)) + machine.Spec.Network.NetworkDevices[0].LinkMTU = ptr.To(int32(1000)) g.Expect(k8sClient.Create(testEnv.GetContext(), &machine)).To(MatchError(ContainSubstring("mtu must be at least 1280, but was 1000"))) }) @@ -75,31 +75,9 @@ var _ = Describe("Controller Test", func() { It("should disallow routing policy without table", func() { machine := validProxmoxMachine("test-machine") - machine.Spec.Network.AdditionalDevices[0].InterfaceConfig.Routing.RoutingPolicy[0].Table = nil + machine.Spec.Network.NetworkDevices[0].InterfaceConfig.Routing.RoutingPolicy = []infrav1.RoutingPolicySpec{{}} g.Expect(k8sClient.Create(testEnv.GetContext(), &machine)).To(MatchError(ContainSubstring("routing policy [0] requires a table"))) }) - - It("should disallow machine with network spec but without Default device", func() { - machine := validProxmoxMachine("test-machine") - machine.Spec.Network = &infrav1.NetworkSpec{ - AdditionalDevices: []infrav1.AdditionalNetworkDevice{ - { - Name: "net1", - NetworkDevice: infrav1.NetworkDevice{ - Bridge: "vmbr2", - IPPoolConfig: infrav1.IPPoolConfig{ - IPv4PoolRef: &corev1.TypedLocalObjectReference{ - APIGroup: ptr.To("ipam.cluster.x-k8s.io"), - Kind: "InClusterIPPool", - Name: "simple-pool", - }, - }, - }, - }, - }, - } - g.Expect(k8sClient.Create(testEnv.GetContext(), &machine)).To(MatchError(ContainSubstring("default network device must be set when setting network spec"))) - }) }) Context("update proxmox cluster", func() { @@ -109,12 +87,12 @@ var _ = Describe("Controller Test", func() { g.Expect(k8sClient.Create(testEnv.GetContext(), &machine)).To(Succeed()) g.Expect(k8sClient.Get(testEnv.GetContext(), client.ObjectKeyFromObject(&machine), &machine)).To(Succeed()) - machine.Spec.Network.Default.MTU = ptr.To(uint16(50)) + machine.Spec.Network.NetworkDevices[0].MTU = ptr.To(int32(50)) - g.Expect(k8sClient.Update(testEnv.GetContext(), &machine)).To(MatchError(ContainSubstring("spec.network.default.mtu: Invalid value"))) - machine.Spec.Network.Default.VLAN = ptr.To(uint16(0)) + g.Expect(k8sClient.Update(testEnv.GetContext(), &machine)).To(MatchError(ContainSubstring("invalid MTU value"))) + machine.Spec.Network.NetworkDevices[0].VLAN = ptr.To(int32(0)) - g.Expect(k8sClient.Update(testEnv.GetContext(), &machine)).To(MatchError(ContainSubstring("spec.network.default.vlan: Invalid value"))) + g.Expect(k8sClient.Update(testEnv.GetContext(), &machine)).To(MatchError(ContainSubstring("invalid MTU value"))) g.Eventually(func(g Gomega) { g.Expect(client.IgnoreNotFound(k8sClient.Delete(testEnv.GetContext(), &machine))).To(Succeed()) @@ -140,16 +118,16 @@ func validProxmoxMachine(name string) infrav1.ProxmoxMachine { Name: name, Namespace: "default", }, - Spec: infrav1.ProxmoxMachineSpec{ + Spec: ptr.To(infrav1.ProxmoxMachineSpec{ VirtualMachineCloneSpec: infrav1.VirtualMachineCloneSpec{ TemplateSource: infrav1.TemplateSource{ - SourceNode: "pve", - TemplateID: ptr.To[int32](100), + SourceNode: ptr.To("pve"), + TemplateID: ptr.To(int32(100)), }, }, - NumSockets: 1, - NumCores: 1, - MemoryMiB: 1024, + NumSockets: ptr.To(int32(1)), + NumCores: ptr.To(int32(1)), + MemoryMiB: ptr.To(int32(1024)), Disks: &infrav1.Storage{ BootVolume: &infrav1.DiskSize{ Disk: "scsi[0]", @@ -157,69 +135,65 @@ func validProxmoxMachine(name string) infrav1.ProxmoxMachine { }, }, Network: &infrav1.NetworkSpec{ - Default: &infrav1.NetworkDevice{ - Bridge: "vmbr1", + NetworkDevices: []infrav1.NetworkDevice{{ + Name: ptr.To("net0"), + Bridge: ptr.To("vmbr1"), Model: ptr.To("virtio"), - MTU: ptr.To(uint16(1500)), - VLAN: ptr.To(uint16(100)), - }, - AdditionalDevices: []infrav1.AdditionalNetworkDevice{ - { - Name: "net1", - NetworkDevice: infrav1.NetworkDevice{ - Bridge: "vmbr2", - Model: ptr.To("virtio"), - MTU: ptr.To(uint16(1500)), - VLAN: ptr.To(uint16(100)), - IPPoolConfig: infrav1.IPPoolConfig{ - IPv4PoolRef: &corev1.TypedLocalObjectReference{ - Name: "simple-pool", - Kind: "InClusterIPPool", - APIGroup: ptr.To("ipam.cluster.x-k8s.io"), - }, - }, - }, - InterfaceConfig: infrav1.InterfaceConfig{ - Routing: infrav1.Routing{ - RoutingPolicy: []infrav1.RoutingPolicySpec{{ - Table: ptr.To(uint32(665)), - }}, - }, + MTU: ptr.To(int32(1500)), + VLAN: ptr.To(int32(100)), + }, { + Name: ptr.To("net1"), + Bridge: ptr.To("vmbr2"), + Model: ptr.To("virtio"), + MTU: ptr.To(int32(1500)), + VLAN: ptr.To(int32(100)), + InterfaceConfig: infrav1.InterfaceConfig{ + IPPoolRef: []corev1.TypedLocalObjectReference{{ + Name: "simple-pool", + Kind: InClusterIPPool, + APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + }}, + Routing: infrav1.Routing{ + RoutingPolicy: []infrav1.RoutingPolicySpec{{ + Table: ptr.To(int32(665)), + }}, }, }, - }, + }}, VirtualNetworkDevices: infrav1.VirtualNetworkDevices{ VRFs: []infrav1.VRFDevice{{ Table: 665, Name: "vrf-green", Routing: infrav1.Routing{ RoutingPolicy: []infrav1.RoutingPolicySpec{{ - Table: ptr.To(uint32(665)), + Table: ptr.To(int32(665)), }}, }}, }, }, }, - }, + }), } } func invalidMTUProxmoxMachine(name string) infrav1.ProxmoxMachine { machine := validProxmoxMachine(name) - machine.Spec.Network.Default = &infrav1.NetworkDevice{ - Bridge: "vmbr1", + machine.Spec.Network.NetworkDevices = []infrav1.NetworkDevice{{ + Name: ptr.To("net0"), + Bridge: ptr.To("vmbr1"), Model: ptr.To("virtio"), - MTU: ptr.To(uint16(50)), - } + MTU: ptr.To(int32(50)), + }} return machine } func invalidVLANProxmoxMachine(name string) infrav1.ProxmoxMachine { machine := validProxmoxMachine(name) - machine.Spec.Network.Default = &infrav1.NetworkDevice{ - Bridge: "vmbr1", + machine.Spec.Network.NetworkDevices = []infrav1.NetworkDevice{{ + Name: ptr.To("net0"), + Bridge: ptr.To("vmbr1"), Model: ptr.To("virtio"), - VLAN: ptr.To(uint16(0)), - } + VLAN: ptr.To(int32(0)), + }} return machine } diff --git a/pkg/cloudinit/interfaces.go b/pkg/cloudinit/interfaces.go index 2afa8e43..5bd510df 100644 --- a/pkg/cloudinit/interfaces.go +++ b/pkg/cloudinit/interfaces.go @@ -19,4 +19,5 @@ package cloudinit // Renderer renders cloud-init data. type Renderer interface { Render() ([]byte, error) + Inspect() ([]byte, error) } diff --git a/pkg/cloudinit/metadata.go b/pkg/cloudinit/metadata.go index c79e0d12..258998b6 100644 --- a/pkg/cloudinit/metadata.go +++ b/pkg/cloudinit/metadata.go @@ -16,6 +16,10 @@ limitations under the License. package cloudinit +import ( + "encoding/json" +) + const ( metadataTPl = `instance-id: {{ .InstanceID }} local-hostname: {{ .Hostname }} @@ -55,6 +59,11 @@ func (r *Metadata) Render() (metadata []byte, err error) { return render("metadata", metadataTPl, r.data) } +// Inspect returns a jsonified version for inspection. +func (r *Metadata) Inspect() ([]byte, error) { + return json.Marshal(r.data) +} + func (r *Metadata) validate() error { if r.data.Hostname == "" { return ErrMissingHostname diff --git a/pkg/cloudinit/network.go b/pkg/cloudinit/network.go index c512124c..cae61d0a 100644 --- a/pkg/cloudinit/network.go +++ b/pkg/cloudinit/network.go @@ -17,14 +17,17 @@ limitations under the License. package cloudinit import ( + "encoding/json" "net/netip" + "k8s.io/utils/ptr" + "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/types" ) const ( /* network-config template. */ - networkConfigTPl = `network: + networkConfigTpl = `network: version: 2 renderer: networkd ethernets: @@ -86,46 +89,42 @@ const ( {{- end -}} {{- define "routes" }} - {{- if or .Gateway .Gateway6 }} + {{- if or .IPConfigs .Routes }} routes: + {{- range $ipconfig := .IPConfigs }} + {{- if .Gateway }} {{- if .Gateway }} + {{- if is6 .IPAddress }} + - to: '::/0' + {{- else }} - to: 0.0.0.0/0 + {{- end -}} {{- if .Metric }} metric: {{ .Metric }} {{- end }} via: {{ .Gateway }} {{- end }} - {{- if .Gateway6 }} - - to: '::/0' - {{- if .Metric6 }} - metric: {{ .Metric6 }} - {{- end }} - via: '{{ .Gateway6 }}' - {{- end }} - {{- else }} - {{- if .Routes }} - routes: {{- end -}} - {{- end -}} - {{- range $index, $route := .Routes }} + {{- end -}} + {{- if .Routes }} + {{- range $index, $route := .Routes }} - { {{- if $route.To }} "to": "{{$route.To}}", {{ end -}} {{- if $route.Via }} "via": "{{$route.Via}}", {{ end -}} {{- if $route.Metric }} "metric": {{$route.Metric}}, {{ end -}} {{- if $route.Table }} "table": {{$route.Table}}, {{ end -}} } + {{- end -}} + {{- end -}} {{- end -}} {{- end -}} {{- define "ipAddresses" }} - {{- if or .IPAddress .IPV6Address }} + {{- if .IPConfigs }} addresses: - {{- if .IPAddress }} + {{- range $ipconfig := .IPConfigs }} - {{ .IPAddress }} - {{- end }} - {{- if .IPV6Address }} - - '{{ .IPV6Address }}' - {{- end }} - {{- end }} + {{- end }} + {{- end -}} {{- end -}} {{- define "mtu" }} @@ -162,6 +161,12 @@ func NewNetworkConfig(configs []types.NetworkConfigData) *NetworkConfig { return nc } +// Inspect returns a serialized copy of the NetworkData. This is useful when +// wanting to immutably inspect what goes into the renderer. +func (r *NetworkConfig) Inspect() ([]byte, error) { + return json.Marshal(r.data.NetworkConfigData) +} + // Render returns rendered network-config. func (r *NetworkConfig) Render() ([]byte, error) { if err := r.validate(); err != nil { @@ -169,19 +174,21 @@ func (r *NetworkConfig) Render() ([]byte, error) { } // render network-config - return render("network-config", networkConfigTPl, r.data) + return render("network-config", networkConfigTpl, r.data) } func (r *NetworkConfig) validate() error { if len(r.data.NetworkConfigData) == 0 { return ErrMissingNetworkConfigData } - metrics := make(map[uint32]*struct { + // TODO: Fix validation + metrics := make(map[int32]*struct { ipv4 bool ipv6 bool }) - for i, d := range r.data.NetworkConfigData { + // for i, d := range r.data.NetworkConfigData { + for _, d := range r.data.NetworkConfigData { // TODO: refactor this when network configuration is unified if d.Type != "ethernet" { err := validRoutes(d.Routes) @@ -195,7 +202,7 @@ func (r *NetworkConfig) validate() error { continue } - if !d.DHCP4 && !d.DHCP6 && len(d.IPAddress) == 0 && len(d.IPV6Address) == 0 { + if !d.DHCP4 && !d.DHCP6 && len(d.IPConfigs) == 0 { return ErrMissingIPAddress } @@ -203,49 +210,39 @@ func (r *NetworkConfig) validate() error { return ErrMissingMacAddress } - if !d.DHCP4 && len(d.IPAddress) > 0 { - err := validIPAddress(d.IPAddress) - if err != nil { - return err - } - if d.Gateway == "" && i == 0 { - return ErrMissingGateway - } - } + for _, c := range d.IPConfigs { + var is6 bool + var err error - if !d.DHCP6 && len(d.IPV6Address) > 0 { - err6 := validIPAddress(d.IPV6Address) - if err6 != nil { - return err6 - } - if d.Gateway6 == "" && i == 0 { - return ErrMissingGateway - } - } - if d.Metric != nil { - if _, exists := metrics[*d.Metric]; !exists { - metrics[*d.Metric] = new(struct { - ipv4 bool - ipv6 bool - }) - } - if metrics[*d.Metric].ipv4 { - return ErrConflictingMetrics - } - metrics[*d.Metric].ipv4 = true - } - if d.Metric6 != nil { - if _, exists := metrics[*d.Metric6]; !exists { - metrics[*d.Metric6] = new(struct { - ipv4 bool - ipv6 bool - }) + if !d.DHCP4 || !d.DHCP6 { + is6, err = validIPAddress(c.IPAddress) + if err != nil { + return err + } + if c.Gateway == "" /*&& i == 0*/ { + return ErrMissingGateway + } } - if metrics[*d.Metric6].ipv6 { - return ErrConflictingMetrics + if c.Metric != nil { + if _, exists := metrics[*c.Metric]; !exists { + metrics[*c.Metric] = new(struct { + ipv4 bool + ipv6 bool + }) + } + if !is6 && metrics[*c.Metric].ipv4 { + return ErrConflictingMetrics + } + if is6 && metrics[*c.Metric].ipv6 { + return ErrConflictingMetrics + } + if !is6 { + metrics[*c.Metric].ipv4 = true + } else { + metrics[*c.Metric].ipv6 = true + } } - metrics[*d.Metric6].ipv6 = true } } return nil @@ -257,16 +254,16 @@ func validRoutes(input []types.RoutingData) error { } // No support for blackhole, etc.pp. Add iff you require this. for _, route := range input { - if route.To != "default" { + if ptr.Deref(route.To, "") != "default" { // An IP address is a valid route (implicit smallest subnet) - _, errPrefix := netip.ParsePrefix(route.To) - _, errAddr := netip.ParseAddr(route.To) + _, errPrefix := netip.ParsePrefix(ptr.Deref(route.To, "")) + _, errAddr := netip.ParseAddr(ptr.Deref(route.To, "")) if errPrefix != nil && errAddr != nil { return ErrMalformedRoute } } - if route.Via != "" { - _, err := netip.ParseAddr(route.Via) + if ptr.Deref(route.Via, "") != "" { + _, err := netip.ParseAddr(ptr.Deref(route.Via, "")) if err != nil { return ErrMalformedRoute } @@ -282,19 +279,19 @@ func validFIBRules(input []types.FIBRuleData, isVrf bool) error { for _, rule := range input { // We only support To/From and we require a table if we're not a vrf - if (rule.To == "" && rule.From == "") || (rule.Table == 0 && !isVrf) { + if (ptr.Deref(rule.To, "") == "" && ptr.Deref(rule.From, "") == "") || (ptr.Deref(rule.Table, 0) == 0 && !isVrf) { return ErrMalformedFIBRule } - if rule.To != "" { - _, errPrefix := netip.ParsePrefix(rule.To) - _, errAddr := netip.ParseAddr(rule.To) + if ptr.Deref(rule.To, "") != "" { + _, errPrefix := netip.ParsePrefix(ptr.Deref(rule.To, "")) + _, errAddr := netip.ParseAddr(ptr.Deref(rule.To, "")) if errPrefix != nil && errAddr != nil { return ErrMalformedFIBRule } } - if rule.From != "" { - _, errPrefix := netip.ParsePrefix(rule.From) - _, errAddr := netip.ParseAddr(rule.From) + if ptr.Deref(rule.From, "") != "" { + _, errPrefix := netip.ParsePrefix(ptr.Deref(rule.From, "")) + _, errAddr := netip.ParseAddr(ptr.Deref(rule.From, "")) if errPrefix != nil && errAddr != nil { return ErrMalformedFIBRule } @@ -303,13 +300,13 @@ func validFIBRules(input []types.FIBRuleData, isVrf bool) error { return nil } -func validIPAddress(input string) error { +func validIPAddress(input string) (bool, error) { if input == "" { - return ErrMissingIPAddress + return false, ErrMissingIPAddress } - _, err := netip.ParsePrefix(input) + p, err := netip.ParsePrefix(input) if err != nil { - return ErrMalformedIPAddress + return false, ErrMalformedIPAddress } - return nil + return p.Addr().Is6(), nil } diff --git a/pkg/cloudinit/network_test.go b/pkg/cloudinit/network_test.go index 53819e5e..166d2e0b 100644 --- a/pkg/cloudinit/network_test.go +++ b/pkg/cloudinit/network_test.go @@ -130,20 +130,20 @@ const ( dhcp6: false addresses: - 10.10.10.12/24 - - '2001:db8::1/64' + - 2001:db8::1/64 routes: - to: 0.0.0.0/0 metric: 100 via: 10.10.10.1 - to: '::/0' metric: 100 - via: '2001:db8::1' + via: 2001:db8::1 nameservers: addresses: - '8.8.8.8' - '8.8.4.4'` - expectedValidNetworkConfigIPV6 = `network: + expectedValidNetworkConfigIPv6 = `network: version: 2 renderer: networkd ethernets: @@ -153,11 +153,11 @@ const ( dhcp4: false dhcp6: false addresses: - - '2001:db8::1/64' + - 2001:db8::1/64 routes: - to: '::/0' metric: 100 - via: '2001:db8::1' + via: 2001:db8::1 nameservers: addresses: - '8.8.8.8' @@ -442,9 +442,11 @@ func TestNetworkConfig_Render(t *testing.T) { Type: "ethernet", Name: "eth0", MacAddress: "92:60:a0:5b:22:c2", - IPAddress: "10.10.10.12/24", - Gateway: "10.10.10.1", - Metric: ptr.To(uint32(100)), + IPConfigs: []types.IPConfig{{ + IPAddress: "10.10.10.12/24", + Gateway: "10.10.10.1", + Metric: ptr.To(int32(100)), + }}, DNSServers: []string{"8.8.8.8", "8.8.4.4"}, }, }, @@ -462,11 +464,13 @@ func TestNetworkConfig_Render(t *testing.T) { Type: "ethernet", Name: "eth0", MacAddress: "92:60:a0:5b:22:c2", - IPAddress: "10.10.10.12/24", - Gateway: "10.10.10.1", - Metric: ptr.To(uint32(100)), + IPConfigs: []types.IPConfig{{ + IPAddress: "10.10.10.12/24", + Gateway: "10.10.10.1", + Metric: ptr.To(int32(100)), + }}, DNSServers: []string{"8.8.8.8", "8.8.4.4"}, - LinkMTU: ptr.To(uint16(9001)), + LinkMTU: ptr.To(int32(9001)), }, }, }, @@ -484,9 +488,11 @@ func TestNetworkConfig_Render(t *testing.T) { Name: "eth0", MacAddress: "92:60:a0:5b:22:c2", DHCP6: true, - IPAddress: "10.10.10.12/24", - Gateway: "10.10.10.1", - Metric: ptr.To(uint32(100)), + IPConfigs: []types.IPConfig{{ + IPAddress: "10.10.10.12/24", + Gateway: "10.10.10.1", + Metric: ptr.To(int32(100)), + }}, DNSServers: []string{"8.8.8.8", "8.8.4.4"}, }, }, @@ -505,9 +511,11 @@ func TestNetworkConfig_Render(t *testing.T) { Name: "eth0", MacAddress: "92:60:a0:5b:22:c2", DHCP4: true, - IPAddress: "10.10.10.12/24", - Gateway: "10.10.10.1", - Metric: ptr.To(uint32(100)), + IPConfigs: []types.IPConfig{{ + IPAddress: "10.10.10.12/24", + Gateway: "10.10.10.1", + Metric: ptr.To(int32(100)), + }}, DNSServers: []string{"8.8.8.8", "8.8.4.4"}, }, }, @@ -526,24 +534,28 @@ func TestNetworkConfig_Render(t *testing.T) { Name: "eth0", MacAddress: "92:60:a0:5b:22:c2", DHCP6: true, - IPAddress: "10.10.10.12/24", - Gateway: "10.10.10.1", - Metric: ptr.To(uint32(100)), + IPConfigs: []types.IPConfig{{ + IPAddress: "10.10.10.12/24", + Gateway: "10.10.10.1", + Metric: ptr.To(int32(100)), + }}, DNSServers: []string{"8.8.8.8", "8.8.4.4"}, }, { Type: "ethernet", Name: "eth1", MacAddress: "92:60:a0:5b:22:c3", - IPAddress: "10.10.11.12/24", - Gateway: "10.10.11.1", - Metric: ptr.To(uint32(200)), + IPConfigs: []types.IPConfig{{ + IPAddress: "10.10.11.12/24", + Gateway: "10.10.11.1", + Metric: ptr.To(int32(200)), + }}, Routes: []types.RoutingData{{ - To: "172.16.24.1/24", - Metric: 50, - Via: "10.10.10.254", + To: ptr.To("172.16.24.1/24"), + Metric: ptr.To(int32(50)), + Via: ptr.To("10.10.10.254"), }, { - To: "2002::/64", - Via: "2001:db8::1", + To: ptr.To("2002::/64"), + Via: ptr.To("2001:db8::1"), }, }, }, @@ -563,22 +575,26 @@ func TestNetworkConfig_Render(t *testing.T) { Name: "eth0", MacAddress: "92:60:a0:5b:22:c2", DHCP6: true, - IPAddress: "10.10.10.12/24", - Gateway: "10.10.10.1", - Metric: ptr.To(uint32(100)), + IPConfigs: []types.IPConfig{{ + IPAddress: "10.10.10.12/24", + Gateway: "10.10.10.1", + Metric: ptr.To(int32(100)), + }}, DNSServers: []string{"8.8.8.8", "8.8.4.4"}, }, { - Type: "ethernet", - Name: "eth1", - IPAddress: "10.10.11.12/24", - Gateway: "10.10.11.1", - Metric: ptr.To(uint32(200)), + Type: "ethernet", + Name: "eth1", + IPConfigs: []types.IPConfig{{ + IPAddress: "10.10.11.12/24", + Gateway: "10.10.11.1", + Metric: ptr.To(int32(200)), + }}, MacAddress: "92:60:a0:5b:22:c3", FIBRules: []types.FIBRuleData{{ - To: "0.0.0.0/0", - From: "192.168.178.1/24", - Priority: 999, - Table: 100, + To: ptr.To("0.0.0.0/0"), + From: ptr.To("192.168.178.1/24"), + Priority: ptr.To(int64(999)), + Table: ptr.To(int32(100)), }, }, }, @@ -597,8 +613,10 @@ func TestNetworkConfig_Render(t *testing.T) { Type: "ethernet", Name: "eth0", MacAddress: "92:60:a0:5b:22:c2", - Gateway: "10.10.10.1", - Metric: ptr.To(uint32(100)), + IPConfigs: []types.IPConfig{{ + Gateway: "10.10.10.1", + Metric: ptr.To(int32(100)), + }}, DNSServers: []string{"8.8.8.8", "8.8.4.4"}, }, }, @@ -616,9 +634,11 @@ func TestNetworkConfig_Render(t *testing.T) { Type: "ethernet", Name: "eth0", MacAddress: "92:60:a0:5b:22:c2", - IPAddress: "10.10.10.12", - Gateway: "10.10.10.1", - Metric: ptr.To(uint32(100)), + IPConfigs: []types.IPConfig{{ + IPAddress: "10.10.10.12", + Gateway: "10.10.10.1", + Metric: ptr.To(int32(100)), + }}, DNSServers: []string{"8.8.8.8", "8.8.4.4"}, }, }, @@ -636,9 +656,11 @@ func TestNetworkConfig_Render(t *testing.T) { Type: "ethernet", Name: "eth0", MacAddress: "92:60:a0:5b:22:c2", - IPAddress: "10.10.10.115", - Gateway: "10.10.10.1", - Metric: ptr.To(uint32(100)), + IPConfigs: []types.IPConfig{{ + IPAddress: "10.10.10.115", + Gateway: "10.10.10.1", + Metric: ptr.To(int32(100)), + }}, DNSServers: []string{"8.8.8.8", "8.8.4.4"}, }, }, @@ -656,8 +678,10 @@ func TestNetworkConfig_Render(t *testing.T) { Type: "ethernet", Name: "eth0", MacAddress: "92:60:a0:5b:22:c2", - IPAddress: "10.10.10.12/24", - Metric: ptr.To(uint32(100)), + IPConfigs: []types.IPConfig{{ + IPAddress: "10.10.10.12/24", + Metric: ptr.To(int32(100)), + }}, DNSServers: []string{"8.8.8.8", "8.8.4.4"}, }, }, @@ -672,11 +696,13 @@ func TestNetworkConfig_Render(t *testing.T) { args: args{ nics: []types.NetworkConfigData{ { - Type: "ethernet", - Name: "eth0", - IPAddress: "10.10.10.11/24", - Gateway: "10.10.10.1", - Metric: ptr.To(uint32(100)), + Type: "ethernet", + Name: "eth0", + IPConfigs: []types.IPConfig{{ + IPAddress: "10.10.10.11/24", + Gateway: "10.10.10.1", + Metric: ptr.To(int32(100)), + }}, DNSServers: []string{"8.8.8.8", "8.8.4.4"}, }, }, @@ -694,17 +720,21 @@ func TestNetworkConfig_Render(t *testing.T) { Type: "ethernet", Name: "eth0", MacAddress: "92:60:a0:5b:22:c2", - IPAddress: "10.10.10.11/24", - Gateway: "10.10.10.1", - Metric: ptr.To(uint32(100)), + IPConfigs: []types.IPConfig{{ + IPAddress: "10.10.10.11/24", + Gateway: "10.10.10.1", + Metric: ptr.To(int32(100)), + }}, DNSServers: []string{"8.8.8.8", "8.8.4.4"}, }, { Type: "ethernet", Name: "eth1", MacAddress: "92:60:a0:5b:22:c5", - IPAddress: "10.10.11.11/24", - Gateway: "10.10.11.254", - Metric: ptr.To(uint32(100)), + IPConfigs: []types.IPConfig{{ + IPAddress: "10.10.11.11/24", + Gateway: "10.10.11.254", + Metric: ptr.To(int32(100)), + }}, DNSServers: []string{"8.8.8.8", "8.8.4.4"}, }, }, @@ -722,9 +752,11 @@ func TestNetworkConfig_Render(t *testing.T) { Type: "ethernet", Name: "eth0", MacAddress: "92:60:a0:5b:22:c2", - IPAddress: "10.10.10.12/24", - Gateway: "10.10.10.1", - Metric: ptr.To(uint32(100)), + IPConfigs: []types.IPConfig{{ + IPAddress: "10.10.10.12/24", + Gateway: "10.10.10.1", + Metric: ptr.To(int32(100)), + }}, }, }, }, @@ -741,18 +773,22 @@ func TestNetworkConfig_Render(t *testing.T) { Type: "ethernet", Name: "eth0", MacAddress: "92:60:a0:5b:22:c2", - IPAddress: "10.10.10.12/24", - Gateway: "10.10.10.1", - Metric: ptr.To(uint32(100)), + IPConfigs: []types.IPConfig{{ + IPAddress: "10.10.10.12/24", + Gateway: "10.10.10.1", + Metric: ptr.To(int32(100)), + }}, DNSServers: []string{"8.8.8.8", "8.8.4.4"}, }, { Type: "ethernet", Name: "eth1", MacAddress: "b4:87:18:bf:a3:60", - IPAddress: "196.168.100.124/24", - Gateway: "196.168.100.254", - Metric: ptr.To(uint32(200)), + IPConfigs: []types.IPConfig{{ + IPAddress: "196.168.100.124/24", + Gateway: "196.168.100.254", + Metric: ptr.To(int32(200)), + }}, DNSServers: []string{"8.8.8.8", "8.8.4.4"}, }, }, @@ -777,16 +813,19 @@ func TestNetworkConfig_Render(t *testing.T) { args: args{ nics: []types.NetworkConfigData{ { - Type: "ethernet", - Name: "eth0", - MacAddress: "92:60:a0:5b:22:c2", - IPAddress: "10.10.10.12/24", - IPV6Address: "2001:db8::1/64", - Gateway6: "2001:db8::1", - Metric6: ptr.To(uint32(100)), - Gateway: "10.10.10.1", - Metric: ptr.To(uint32(100)), - DNSServers: []string{"8.8.8.8", "8.8.4.4"}, + Type: "ethernet", + Name: "eth0", + MacAddress: "92:60:a0:5b:22:c2", + IPConfigs: []types.IPConfig{{ + IPAddress: "10.10.10.12/24", + Gateway: "10.10.10.1", + Metric: ptr.To(int32(100)), + }, { + IPAddress: "2001:db8::1/64", + Gateway: "2001:db8::1", + Metric: ptr.To(int32(100)), + }}, + DNSServers: []string{"8.8.8.8", "8.8.4.4"}, }, }, }, @@ -795,23 +834,25 @@ func TestNetworkConfig_Render(t *testing.T) { err: nil, }, }, - "ValidNetworkConfigIPV6": { + "ValidNetworkConfigIPv6": { reason: "render valid ipv6 network-config", args: args{ nics: []types.NetworkConfigData{ { - Type: "ethernet", - Name: "eth0", - MacAddress: "92:60:a0:5b:22:c2", - IPV6Address: "2001:db8::1/64", - Gateway6: "2001:db8::1", - Metric6: ptr.To(uint32(100)), - DNSServers: []string{"8.8.8.8", "8.8.4.4"}, + Type: "ethernet", + Name: "eth0", + MacAddress: "92:60:a0:5b:22:c2", + IPConfigs: []types.IPConfig{{ + IPAddress: "2001:db8::1/64", + Gateway: "2001:db8::1", + Metric: ptr.To(int32(100)), + }}, + DNSServers: []string{"8.8.8.8", "8.8.4.4"}, }, }, }, want: want{ - network: expectedValidNetworkConfigIPV6, + network: expectedValidNetworkConfigIPv6, err: nil, }, }, @@ -880,18 +921,22 @@ func TestNetworkConfig_Render(t *testing.T) { Type: "ethernet", Name: "eth0", MacAddress: "92:60:a0:5b:22:c2", - IPAddress: "10.10.10.12/24", - Gateway: "10.10.10.1", - Metric: ptr.To(uint32(100)), + IPConfigs: []types.IPConfig{{ + IPAddress: "10.10.10.12/24", + Gateway: "10.10.10.1", + Metric: ptr.To(int32(100)), + }}, DNSServers: []string{"8.8.8.8", "8.8.4.4"}, }, { Type: "ethernet", Name: "eth1", MacAddress: "b4:87:18:bf:a3:60", - IPAddress: "196.168.100.124/24", - Gateway: "196.168.100.254", - Metric: ptr.To(uint32(200)), + IPConfigs: []types.IPConfig{{ + IPAddress: "196.168.100.124/24", + Gateway: "196.168.100.254", + Metric: ptr.To(int32(200)), + }}, DNSServers: []string{"8.8.8.8", "8.8.4.4"}, }, { @@ -900,20 +945,20 @@ func TestNetworkConfig_Render(t *testing.T) { Table: 500, Interfaces: []string{"eth0", "eth1"}, Routes: []types.RoutingData{{ - To: "default", - Via: "192.168.178.1", - Metric: 100, - Table: 100, + To: ptr.To("default"), + Via: ptr.To("192.168.178.1"), + Metric: ptr.To(int32(100)), + Table: ptr.To(int32(100)), }, { - To: "10.10.10.0/24", - Via: "192.168.178.254", - Metric: 100, + To: ptr.To("10.10.10.0/24"), + Via: ptr.To("192.168.178.254"), + Metric: ptr.To(int32(100)), }}, FIBRules: []types.FIBRuleData{{ - To: "0.0.0.0/0", - From: "192.168.178.1/24", - Priority: 999, - Table: 100, + To: ptr.To("0.0.0.0/0"), + From: ptr.To("192.168.178.1/24"), + Priority: ptr.To(int64(999)), + Table: ptr.To(int32(100)), }}, }, }, @@ -931,52 +976,56 @@ func TestNetworkConfig_Render(t *testing.T) { Type: "ethernet", Name: "eth0", MacAddress: "92:60:a0:5b:22:c2", - IPAddress: "10.10.10.12/24", - Gateway: "10.10.10.1", - Metric: ptr.To(uint32(100)), + IPConfigs: []types.IPConfig{{ + IPAddress: "10.10.10.12/24", + Gateway: "10.10.10.1", + Metric: ptr.To(int32(100)), + }}, DNSServers: []string{"8.8.8.8", "8.8.4.4"}, }, { Type: "ethernet", Name: "eth1", MacAddress: "b4:87:18:bf:a3:60", - IPAddress: "196.168.100.124/24", - Gateway: "196.168.100.254", - Metric: ptr.To(uint32(200)), + IPConfigs: []types.IPConfig{{ + IPAddress: "196.168.100.124/24", + Gateway: "196.168.100.254", + Metric: ptr.To(int32(200)), + }}, DNSServers: []string{"8.8.8.8", "8.8.4.4"}, }, { Type: "vrf", Name: "vrf-blue", - Table: 500, + Table: int32(500), Interfaces: []string{"eth0"}, Routes: []types.RoutingData{{ - To: "default", - Via: "192.168.178.1", - Metric: 100, - Table: 100, + To: ptr.To("default"), + Via: ptr.To("192.168.178.1"), + Metric: ptr.To(int32(100)), + Table: ptr.To(int32(100)), }, { - To: "10.10.10.0/24", - Via: "192.168.178.254", - Metric: 100, + To: ptr.To("10.10.10.0/24"), + Via: ptr.To("192.168.178.254"), + Metric: ptr.To(int32(100)), }}, FIBRules: []types.FIBRuleData{{ - To: "0.0.0.0/0", - From: "192.168.178.1/24", - Priority: 999, - Table: 100, + To: ptr.To("0.0.0.0/0"), + From: ptr.To("192.168.178.1/24"), + Priority: ptr.To(int64(999)), + Table: ptr.To(int32(100)), }}, }, { Type: "vrf", Name: "vrf-red", - Table: 501, + Table: int32(501), Interfaces: []string{"eth1"}, FIBRules: []types.FIBRuleData{{ - To: "0.0.0.0/0", - From: "192.168.100.0/24", - Priority: 999, - Table: 101, + To: ptr.To("0.0.0.0/0"), + From: ptr.To("192.168.100.0/24"), + Priority: ptr.To(int64(999)), + Table: ptr.To(int32(101)), }}, }, }, @@ -993,9 +1042,9 @@ func TestNetworkConfig_Render(t *testing.T) { { Type: "vrf", Name: "vrf-blue", - Table: 500, + Table: int32(500), FIBRules: []types.FIBRuleData{{ - From: "10.10.0.0/16", + From: ptr.To("10.10.0.0/16"), }}, }, }, @@ -1012,10 +1061,10 @@ func TestNetworkConfig_Render(t *testing.T) { { Type: "vrf", Name: "vrf-blue", - Table: 500, + Table: int32(500), Interfaces: []string{"eth0", "eth1"}, Routes: []types.RoutingData{{ - Table: 100, + Table: ptr.To(int32(100)), }}, }, }, diff --git a/pkg/cloudinit/render.go b/pkg/cloudinit/render.go index f7c24a40..79f6f008 100644 --- a/pkg/cloudinit/render.go +++ b/pkg/cloudinit/render.go @@ -18,13 +18,19 @@ package cloudinit import ( "bytes" + "net/netip" "text/template" "github.com/pkg/errors" ) +func is6(addr string) bool { + return netip.MustParsePrefix(addr).Addr().Is6() +} + func render(name string, tpl string, data BaseCloudInitData) ([]byte, error) { - mt, err := template.New(name).Parse(tpl) + f := map[string]any{"is6": is6} + mt, err := template.New(name).Funcs(f).Parse(tpl) if err != nil { return nil, errors.Wrapf(err, "failed to parse %s template", name) } diff --git a/pkg/consts/consts.go b/pkg/consts/consts.go new file mode 100644 index 00000000..d4f46b14 --- /dev/null +++ b/pkg/consts/consts.go @@ -0,0 +1,52 @@ +/* +Copyright 2025 IONOS Cloud. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package consts contains global consts. +package consts + +import ( + "reflect" + + "k8s.io/utils/ptr" + ipamicv1 "sigs.k8s.io/cluster-api-ipam-provider-in-cluster/api/v1alpha2" +) + +// GetGlobalInClusterIPPoolKind returns the kind string of a GlobalInClusterIPPool, +// which is useful for typedlocalobjectreferences. +func GetGlobalInClusterIPPoolKind() string { + return reflect.ValueOf(ipamicv1.GlobalInClusterIPPool{}).Type().Name() +} + +// GetGlobalInClusterIPPoolKind returns the kind string of a InClusterIPPool, +// which is useful for typedlocalobjectreferences. +func GetInClusterIPPoolKind() string { + return reflect.ValueOf(ipamicv1.InClusterIPPool{}).Type().Name() +} + +func GetIpamInClusterAPIGroup() *string { + return ptr.To(ipamicv1.GroupVersion.String()) +} + +func GetIpamInClusterAPIVersion() string { + return ipamicv1.GroupVersion.Group +} + +const ( + // GlobalInClusterIPPool is the Global In-Cluster Pool. + GlobalInClusterIPPool = "GlobalInClusterIPPool" + // InClusterIPPool is the In-Cluster Pool. + InClusterIPPool = "InClusterIPPool" +) diff --git a/pkg/ignition/enrich.go b/pkg/ignition/enrich.go index d6eaef9e..f0eefc94 100644 --- a/pkg/ignition/enrich.go +++ b/pkg/ignition/enrich.go @@ -25,6 +25,7 @@ import ( ignition "github.com/flatcar/ignition/config/v2_3" ignitionTypes "github.com/flatcar/ignition/config/v2_3/types" "github.com/pkg/errors" + "inet.af/netaddr" "k8s.io/utils/ptr" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/types" @@ -113,11 +114,22 @@ func (e *Enricher) getEnrichConfig() (*ignitionTypes.Config, error) { func (e *Enricher) getProxmoxEnvContent() string { content := fmt.Sprintf("COREOS_CUSTOM_HOSTNAME=%s\nCOREOS_CUSTOM_INSTANCE_ID=%s\nCOREOS_CUSTOM_PROVIDER_ID=%s", e.Hostname, e.InstanceID, e.ProviderID) - if len(e.Network) > 0 && e.Network[0].IPAddress != "" { - content += fmt.Sprintf("\nCOREOS_CUSTOM_PRIVATE_IPV4=%s", e.Network[0].IPAddress) - } - if len(e.Network) > 0 && e.Network[0].IPV6Address != "" { - content += fmt.Sprintf("\nCOREOS_CUSTOM_PRIVATE_IPV6=%s", e.Network[0].IPV6Address) + // TODO: consider adding a kube-vip config field to NetworkConfigData + // TODO: consider having an ip address type (maybe just inet.af/netaddr.IP) instead of string + var seen4, seen6 bool + if len(e.Network) > 0 { + for _, ipconfig := range e.Network[0].IPConfigs { + if ip, err := netaddr.ParseIP(ipconfig.IPAddress); err != nil { + if !seen4 && ip.Is4() { + content += fmt.Sprintf("\nCOREOS_CUSTOM_PRIVATE_IPV4=%s", ipconfig.IPAddress) + seen4 = true + } + if !seen6 && ip.Is6() { + content += fmt.Sprintf("\nCOREOS_CUSTOM_PRIVATE_IPV6=%s", ipconfig.IPAddress) + seen6 = true + } + } + } } return url.PathEscape(content) } diff --git a/pkg/ignition/enrich_test.go b/pkg/ignition/enrich_test.go index 9c09d664..6aae8699 100644 --- a/pkg/ignition/enrich_test.go +++ b/pkg/ignition/enrich_test.go @@ -60,12 +60,12 @@ func TestEnricher_Enrich(t *testing.T) { ProviderID: "proxmox://xxxx-xxx", Network: []types.NetworkConfigData{ { - Name: "eth0", - IPAddress: "10.1.1.9/24", - IPV6Address: "2001:db8::1/64", - Gateway6: "2001:db8::1", - Gateway: "10.1.1.1", - DNSServers: []string{"10.1.1.1"}, + Name: "eth0", + IPConfigs: []types.IPConfig{ + {IPAddress: "10.1.1.9/24", Gateway: "10.1.1.1"}, + {IPAddress: "2001:db8::1/64", Gateway: "2001:db8::1"}, + }, + DNSServers: []string{"10.1.1.1"}, }, }, } diff --git a/pkg/ignition/network.go b/pkg/ignition/network.go index 00bf4747..e3ff5ae0 100644 --- a/pkg/ignition/network.go +++ b/pkg/ignition/network.go @@ -3,6 +3,7 @@ package ignition import ( "bytes" "fmt" + "net/netip" "text/template" "github.com/pkg/errors" @@ -14,19 +15,17 @@ const ( networkTypeEthernet = "ethernet" networkTypeVRF = "vrf" - // networkConfigTPlNetworkd is a Go template to generate systemd-networkd unit files + // networkConfigTplNetworkd is a Go template to generate systemd-networkd unit files // based on the data schema provided for network-config v2. - networkConfigTPlNetworkd = `{{- $element := . -}} + networkConfigTplNetworkd = `{{- $element := . -}} {{- $type := $element.Type -}} {{ if eq $type "ethernet" -}} [Match] MACAddress={{ $element.MacAddress }} - {{- if .LinkMTU }} [Link] MTUBytes={{ .LinkMTU }} {{- end }} - [Network] {{- if .VRF }} VRF={{ .VRF }} @@ -38,49 +37,34 @@ DHCP=ipv4 {{- else if $element.DHCP6 }} DHCP=ipv6 {{- end }} - {{- template "dns" . }} - -{{- if $element.IPAddress }} -[Address] -Address={{ $element.IPAddress }} -{{- end }} - -{{- if $element.IPV6Address }} +{{- range $ipconfig := $element.IPConfigs -}} +{{ if .IPAddress }} [Address] -Address={{ $element.IPV6Address }} -{{- end }} - -{{ if or .Gateway .Gateway6 }} -{{- if .Gateway -}} +Address={{ .IPAddress }} +{{- end -}} +{{- if .Gateway }} [Route] +{{- if is6 .IPAddress }} +Destination=::/0 +{{- else }} Destination=0.0.0.0/0 +{{- end }} Gateway={{ .Gateway }} {{- if .Metric }} Metric={{ .Metric }} {{- end }} {{- end }} - -{{- if .Gateway6 }} -[Route] -Destination=::/0 -Gateway={{ .Gateway6 }} -{{- if .Metric6 }} -Metric={{ .Metric6 }} -{{- end }} -{{- end }} {{- end }} {{ template "routes" . -}} {{ template "rules" . -}} {{- end -}} - {{- if eq $type "vrf" -}} [Match] Name={{ $element.Name }} {{ template "routes" . -}} {{- template "rules" . -}} {{- end -}} - {{- define "dns" }} {{- if .DNSServers }} {{- range $dnsServer := .DNSServers }} @@ -88,12 +72,11 @@ DNS={{ $dnsServer }} {{- end }} {{- end }} {{- end }} - {{- define "rules" }} -{{- if .FIBRules }} +{{- if .FIBRules -}} {{- $type := .Type }} {{- $table := .Table }} -{{- range $index, $rule := .FIBRules }} +{{- range $index, $rule := .FIBRules -}} [RoutingPolicyRule] {{ if $rule.To }}To={{$rule.To}}{{- end }} {{ if $rule.From }}From={{$rule.From}}{{- end }} @@ -105,10 +88,9 @@ DNS={{ $dnsServer }} {{- end -}} {{- end -}} {{- end -}} - {{- define "routes" }} {{- if .Routes }} -{{- range $index, $route := .Routes }} +{{- range $index, $route := .Routes -}} [Route] {{ if $route.To }}Destination={{$route.To}}{{- end }} {{ if $route.Via }}Gateway={{$route.Via}}{{- end }} @@ -124,7 +106,6 @@ DNS={{ $dnsServer }} [NetDev] Name={{ $element.Name }} Kind={{ $element.Type }} - [VRF] Table={{ $element.Table }} {{- end }} @@ -158,7 +139,7 @@ func RenderNetworkConfigData(data []types.NetworkConfigData) (map[string][]byte, } for i, networkConfig := range data { - config, err := render(fmt.Sprintf("%d-%s", i, networkConfig.Type), networkConfigTPlNetworkd, networkConfig) + config, err := render(fmt.Sprintf("%d-%s", i, networkConfig.Type), networkConfigTplNetworkd, networkConfig) if err != nil { return nil, err } @@ -177,8 +158,12 @@ func RenderNetworkConfigData(data []types.NetworkConfigData) (map[string][]byte, return configs, nil } +func is6(addr string) bool { + return netip.MustParsePrefix(addr).Addr().Is6() +} + func render(name string, tpl string, data types.NetworkConfigData) ([]byte, error) { - mt, err := template.New(name).Parse(tpl) + mt, err := template.New(name).Funcs(map[string]any{"is6": is6}).Parse(tpl) if err != nil { return nil, errors.Wrapf(err, "failed to parse %s template", name) } diff --git a/pkg/ignition/network_test.go b/pkg/ignition/network_test.go index ed96c581..3f5435e7 100644 --- a/pkg/ignition/network_test.go +++ b/pkg/ignition/network_test.go @@ -13,35 +13,31 @@ var ( expectedValidNetworkConfig = map[string][]byte{ "00-eth0.network": []byte(`[Match] MACAddress=E2:B8:FE:E7:50:75 - [Network] DNS=10.0.1.1 [Address] Address=10.0.0.98/25 -[Address] -Address=2001:db8:1::10/64 - [Route] Destination=0.0.0.0/0 Gateway=10.0.0.1 Metric=100 +[Address] +Address=2001:db8:1::10/64 [Route] Destination=::/0 Gateway=2001:db8:1::1 +Metric=100 `), "01-eth1.network": []byte(`[Match] MACAddress=E2:8E:95:1F:EB:36 - [Network] DNS=10.0.1.1 [Address] Address=10.0.1.84/25 - [Route] Destination=0.0.0.0/0 Gateway=10.0.1.1 Metric=200 - [RoutingPolicyRule] To=8.7.6.5/32 From=1.1.1.1/32 @@ -54,37 +50,33 @@ Table=500 "00-vrf0.netdev": []byte(`[NetDev] Name=vrf0 Kind=vrf - [VRF] Table=644 `), "00-eth0.network": []byte(`[Match] MACAddress=E2:B8:FE:E7:50:75 - [Network] DNS=10.0.1.1 [Address] Address=10.0.0.98/25 -[Address] -Address=2001:db8:1::10/64 - [Route] Destination=0.0.0.0/0 Gateway=10.0.0.1 Metric=100 +[Address] +Address=2001:db8:1::10/64 [Route] Destination=::/0 Gateway=2001:db8:1::1 +Metric=100 `), "01-eth1.network": []byte(`[Match] MACAddress=E2:8E:95:1F:EB:36 - [Network] VRF=vrf0 DNS=10.0.1.1 [Address] Address=10.0.1.84/25 - [Route] Destination=0.0.0.0/0 Gateway=10.0.1.1 @@ -93,12 +85,10 @@ Metric=200 "02-vrf2.network": []byte(`[Match] Name=vrf0 - [Route] Destination=3.4.5.6 Gateway=10.0.1.1 Metric=100 - [RoutingPolicyRule] To=8.7.6.5/32 From=1.1.1.1/32 @@ -128,31 +118,30 @@ func TestRenderNetworkConfigData(t *testing.T) { args: args{ nics: []types.NetworkConfigData{ { - Type: "ethernet", - Name: "eth0", - MacAddress: "E2:B8:FE:E7:50:75", - IPAddress: "10.0.0.98/25", - Gateway: "10.0.0.1", - ProxName: "net0", - DNSServers: []string{"10.0.1.1"}, - Metric: ptr.To(uint32(100)), - IPV6Address: "2001:db8:1::10/64", - Gateway6: "2001:db8:1::1", + Type: "ethernet", + Name: "eth0", + MacAddress: "E2:B8:FE:E7:50:75", + IPConfigs: []types.IPConfig{ + {IPAddress: "10.0.0.98/25", Gateway: "10.0.0.1", Metric: ptr.To(int32(100))}, + {IPAddress: "2001:db8:1::10/64", Gateway: "2001:db8:1::1", Metric: ptr.To(int32(100))}, + }, + ProxName: ptr.To("net0"), + DNSServers: []string{"10.0.1.1"}, }, { Type: "ethernet", Name: "eth1", MacAddress: "E2:8E:95:1F:EB:36", - IPAddress: "10.0.1.84/25", - Gateway: "10.0.1.1", - ProxName: "net1", + IPConfigs: []types.IPConfig{ + {IPAddress: "10.0.1.84/25", Gateway: "10.0.1.1", Metric: ptr.To(int32(200))}, + }, + ProxName: ptr.To("net1"), DNSServers: []string{"10.0.1.1"}, - Metric: ptr.To(uint32(200)), FIBRules: []types.FIBRuleData{{ - To: "8.7.6.5/32", - From: "1.1.1.1/32", - Priority: 100, - Table: 500, + To: ptr.To("8.7.6.5/32"), + From: ptr.To("1.1.1.1/32"), + Priority: ptr.To(int64(100)), + Table: ptr.To(int32(500)), }}, }, }, @@ -167,42 +156,41 @@ func TestRenderNetworkConfigData(t *testing.T) { args: args{ nics: []types.NetworkConfigData{ { - Type: "ethernet", - Name: "eth0", - MacAddress: "E2:B8:FE:E7:50:75", - IPAddress: "10.0.0.98/25", - Gateway: "10.0.0.1", - ProxName: "net0", - DNSServers: []string{"10.0.1.1"}, - Metric: ptr.To(uint32(100)), - IPV6Address: "2001:db8:1::10/64", - Gateway6: "2001:db8:1::1", + Type: "ethernet", + Name: "eth0", + MacAddress: "E2:B8:FE:E7:50:75", + IPConfigs: []types.IPConfig{ + {IPAddress: "10.0.0.98/25", Gateway: "10.0.0.1", Metric: ptr.To(int32(100))}, + {IPAddress: "2001:db8:1::10/64", Gateway: "2001:db8:1::1", Metric: ptr.To(int32(100))}, + }, + ProxName: ptr.To("net0"), + DNSServers: []string{"10.0.1.1"}, }, { Type: "ethernet", Name: "eth1", MacAddress: "E2:8E:95:1F:EB:36", - IPAddress: "10.0.1.84/25", - Gateway: "10.0.1.1", - ProxName: "net1", + IPConfigs: []types.IPConfig{ + {IPAddress: "10.0.1.84/25", Gateway: "10.0.1.1", Metric: ptr.To(int32(200))}, + }, + ProxName: ptr.To("net1"), DNSServers: []string{"10.0.1.1"}, - Metric: ptr.To(uint32(200)), }, { Type: "vrf", Name: "vrf0", - ProxName: "net1", + ProxName: ptr.To("net1"), Table: 644, Interfaces: []string{"eth1"}, Routes: []types.RoutingData{{ - To: "3.4.5.6", - Via: "10.0.1.1", - Metric: 100, + To: ptr.To("3.4.5.6"), + Via: ptr.To("10.0.1.1"), + Metric: ptr.To(int32(100)), }}, FIBRules: []types.FIBRuleData{{ - To: "8.7.6.5/32", - From: "1.1.1.1/32", - Priority: 100, + To: ptr.To("8.7.6.5/32"), + From: ptr.To("1.1.1.1/32"), + Priority: ptr.To(int64(100)), }}, }, }, diff --git a/pkg/kubernetes/ipam/ipam.go b/pkg/kubernetes/ipam/ipam.go index 8ca564c7..f025c36a 100644 --- a/pkg/kubernetes/ipam/ipam.go +++ b/pkg/kubernetes/ipam/ipam.go @@ -21,10 +21,16 @@ package ipam import ( "context" "fmt" + "net/netip" + "reflect" + "regexp" + "slices" + "strings" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/utils/ptr" @@ -35,7 +41,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" + infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha2" + . "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/consts" ) // Helper provides handling of ipam objects such as, InClusterPool, IPAddressClaim. @@ -58,6 +65,37 @@ func InClusterPoolFormat(cluster *infrav1.ProxmoxCluster, format string) string return fmt.Sprintf("%s-%s-icip", cluster.GetName(), format) } +// GetInClusterPools returns the IPPools belonging to the ProxmoxCluster. +func (h *Helper) GetInClusterPools(ctx context.Context, moxm *infrav1.ProxmoxMachine) (map[string]*ipamicv1.InClusterIPPool, error) { + pools := map[string]*ipamicv1.InClusterIPPool{} + namespace := moxm.ObjectMeta.Namespace + + // cluster, _ := util.GetClusterFromMetadata(ctx, h.ctrlClient, machine.ObjectMeta) + clusterName := moxm.ObjectMeta.Labels["cluster.x-k8s.io/cluster-name"] + proxmoxCluster := &infrav1.ProxmoxCluster{} + + h.ctrlClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: clusterName}, proxmoxCluster) + + // TODO: Per ZONE IPPoolRefs + for _, poolRef := range proxmoxCluster.Status.InClusterIPPoolRef { + pool := &ipamicv1.InClusterIPPool{} + h.ctrlClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: poolRef.Name}, pool) + // There's no way of telling if a pool is ipv4 or ipv6 except for parsing it. + // cluster-api-in-cluster-ipam keeps the pool functions to tag a pool ipv4/ipv6 internal, + // so we need to reinvent the wheel here. + re := regexp.MustCompile(`^[^-/]+`) + ipString := re.FindString(pool.Spec.Addresses[0]) + ip, _ := netip.ParseAddr(ipString) + + if ip.Is4() { + pools["ipv4"] = pool + } else if ip.Is6() { + pools["ipv6"] = pool + } + } + return pools, nil +} + // ErrMissingAddresses is returned when the cluster IPAM config does not contain any addresses. var ErrMissingAddresses = errors.New("no valid ip addresses defined for the ip pool") @@ -71,8 +109,13 @@ func (h *Helper) CreateOrUpdateInClusterIPPool(ctx context.Context) error { ipv4Config := h.cluster.Spec.IPv4Config v4Pool := &ipamicv1.InClusterIPPool{ + TypeMeta: metav1.TypeMeta{ + APIVersion: ipamicv1.GroupVersion.String(), + // Thank you ipamic for making InClusterIPPoolKind private + Kind: GetInClusterIPPoolKind(), + }, ObjectMeta: metav1.ObjectMeta{ - Name: InClusterPoolFormat(h.cluster, infrav1.IPV4Format), + Name: InClusterPoolFormat(h.cluster, infrav1.IPv4Format), Namespace: h.cluster.GetNamespace(), Annotations: func() map[string]string { if ipv4Config.Metric != nil { @@ -83,7 +126,7 @@ func (h *Helper) CreateOrUpdateInClusterIPPool(ctx context.Context) error { }, Spec: ipamicv1.InClusterIPPoolSpec{ Addresses: ipv4Config.Addresses, - Prefix: ipv4Config.Prefix, + Prefix: int(ipv4Config.Prefix), Gateway: ipv4Config.Gateway, }, } @@ -113,8 +156,13 @@ func (h *Helper) CreateOrUpdateInClusterIPPool(ctx context.Context) error { // ipv6 if h.cluster.Spec.IPv6Config != nil { v6Pool := &ipamicv1.InClusterIPPool{ + TypeMeta: metav1.TypeMeta{ + APIVersion: ipamicv1.GroupVersion.String(), + // Thank you ipamic for making InClusterIPPoolKind private + Kind: reflect.ValueOf(ipamicv1.InClusterIPPool{}).Type().Name(), + }, ObjectMeta: metav1.ObjectMeta{ - Name: InClusterPoolFormat(h.cluster, infrav1.IPV6Format), + Name: InClusterPoolFormat(h.cluster, infrav1.IPv6Format), Namespace: h.cluster.GetNamespace(), Annotations: func() map[string]string { if h.cluster.Spec.IPv6Config.Metric != nil { @@ -125,7 +173,7 @@ func (h *Helper) CreateOrUpdateInClusterIPPool(ctx context.Context) error { }, Spec: ipamicv1.InClusterIPPoolSpec{ Addresses: h.cluster.Spec.IPv6Config.Addresses, - Prefix: h.cluster.Spec.IPv6Config.Prefix, + Prefix: int(h.cluster.Spec.IPv6Config.Prefix), Gateway: h.cluster.Spec.IPv6Config.Gateway, }, } @@ -199,18 +247,18 @@ func (h *Helper) GetIPPoolAnnotations(ctx context.Context, ipAddress *ipamv1.IPA Name: poolRef.Name, } - if poolRef.Kind == "InClusterIPPool" { + if poolRef.Kind == InClusterIPPool { ipPool, err := h.GetInClusterIPPool(ctx, key) annotations = ipPool.ObjectMeta.Annotations if err != nil { return nil, err } - } else if poolRef.Kind == "GlobalInClusterIPPool" { + } else if poolRef.Kind == GlobalInClusterIPPool { ipPool, err := h.GetGlobalInClusterIPPool(ctx, key) - annotations = ipPool.ObjectMeta.Annotations if err != nil { return nil, err } + annotations = ipPool.ObjectMeta.Annotations } // If neither of these kinds are matched, this is a test case, // therefore no action is to be taken. @@ -219,6 +267,7 @@ func (h *Helper) GetIPPoolAnnotations(ctx context.Context, ipAddress *ipamv1.IPA } // CreateIPAddressClaim creates an IPAddressClaim for a given object. +// TODO: remove. func (h *Helper) CreateIPAddressClaim(ctx context.Context, owner client.Object, device, format, clusterNameLabel string, ref *corev1.TypedLocalObjectReference) error { var gvk schema.GroupVersionKind key := client.ObjectKey{ @@ -226,9 +275,6 @@ func (h *Helper) CreateIPAddressClaim(ctx context.Context, owner client.Object, Name: owner.GetName(), } suffix := infrav1.DefaultSuffix - if format == infrav1.IPV6Format { - suffix += "6" - } switch { case device == infrav1.DefaultNetworkDevice && ref == nil: @@ -241,7 +287,7 @@ func (h *Helper) CreateIPAddressClaim(ctx context.Context, owner client.Object, if err != nil { return err } - case ref.Kind == "InClusterIPPool": + case ref.Kind == InClusterIPPool: pool, err := h.GetInClusterIPPool(ctx, ref) if err != nil { return errors.Wrapf(err, "unable to find inclusterpool for cluster %s", h.cluster.Name) @@ -251,7 +297,7 @@ func (h *Helper) CreateIPAddressClaim(ctx context.Context, owner client.Object, if err != nil { return err } - case ref.Kind == "GlobalInClusterIPPool": + case ref.Kind == GlobalInClusterIPPool: pool, err := h.GetGlobalInClusterIPPool(ctx, ref) if err != nil { return errors.Wrapf(err, "unable to find global inclusterpool for cluster %s", h.cluster.Name) @@ -293,6 +339,69 @@ func (h *Helper) CreateIPAddressClaim(ctx context.Context, owner client.Object, return err } +// CreateIPAddressClaimV2 creates an IPAddressClaim for a given object. +func (h *Helper) CreateIPAddressClaimV2(ctx context.Context, owner client.Object, device string, poolNum int, clusterNameLabel string, ref *corev1.TypedLocalObjectReference) error { + var gvk schema.GroupVersionKind + key := client.ObjectKey{ + Namespace: owner.GetNamespace(), + Name: owner.GetName(), + } + suffix := infrav1.DefaultSuffix + + switch { + case ref.Kind == InClusterIPPool: + pool, err := h.GetInClusterIPPool(ctx, ref) + if err != nil { + return errors.Wrapf(err, "unable to find inclusterpool for cluster %s", h.cluster.Name) + } + key.Name = pool.GetName() + gvk, err = gvkForObject(pool, h.ctrlClient.Scheme()) + if err != nil { + return err + } + case ref.Kind == GlobalInClusterIPPool: + pool, err := h.GetGlobalInClusterIPPool(ctx, ref) + if err != nil { + return errors.Wrapf(err, "unable to find global inclusterpool for cluster %s", h.cluster.Name) + } + key.Name = pool.GetName() + gvk, err = gvkForObject(pool, h.ctrlClient.Scheme()) + if err != nil { + return err + } + default: + return errors.Errorf("unsupported pool type %s", ref.Kind) + } + + // Ensures that the claim has a reference to the cluster of the VM to + // support pausing reconciliation. + labels := map[string]string{ + clusterv1.ClusterNameLabel: clusterNameLabel, + } + + // TODO: suffix makes no sense, fmt.Sprintf() needs to be shared with testing + desired := &ipamv1.IPAddressClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s-%02d-%s", owner.GetName(), device, poolNum, suffix), + Namespace: owner.GetNamespace(), + Labels: labels, + }, + Spec: ipamv1.IPAddressClaimSpec{ + PoolRef: corev1.TypedLocalObjectReference{ + APIGroup: ptr.To(gvk.Group), + Kind: gvk.Kind, + Name: key.Name, + }, + }, + } + _, err := controllerutil.CreateOrUpdate(ctx, h.ctrlClient, desired, func() error { + // set the owner reference to the cluster + return controllerutil.SetControllerReference(owner, desired, h.ctrlClient.Scheme()) + }) + + return err +} + // GetIPAddress attempts to retrieve the IPAddress. func (h *Helper) GetIPAddress(ctx context.Context, key client.ObjectKey) (*ipamv1.IPAddress, error) { out := &ipamv1.IPAddress{} @@ -304,6 +413,73 @@ func (h *Helper) GetIPAddress(ctx context.Context, key client.ObjectKey) (*ipamv return out, nil } +func (h *Helper) GetIPAddressV2(ctx context.Context, poolRef corev1.TypedLocalObjectReference, moxm *infrav1.ProxmoxMachine) ([]ipamv1.IPAddress, error) { + ipAddresses, err := h.GetIPAddressByPool(ctx, poolRef) + + out := make([]ipamv1.IPAddress, 0) + // fieldSelector, err := fields.ParseSelector("spec.poolRef.name=" + poolRef.Name + ",spec.poolRef.kind=" + poolRef.Kind) + // fieldSelector, err := fields.ParseSelector("metadata" + poolRef.Name) + + if err != nil { + return nil, err + } + for _, addr := range ipAddresses { + key := client.ObjectKey{ + Name: addr.Name, + Namespace: addr.Namespace, + } + + // Get the parent to find the owner machine + ipAddressClaim := &ipamv1.IPAddressClaim{} + err := h.ctrlClient.Get(ctx, key, ipAddressClaim) + if err != nil { + return nil, err + } + + // check if current moxm is in the owner reference + isOwner, err := controllerutil.HasOwnerReference(ipAddressClaim.OwnerReferences, moxm, h.ctrlClient.Scheme()) + if err != nil { + return nil, err + } + + if isOwner { + out = append(out, addr) + } + } + + return out, nil +} + +// GetIPAddressByPool attempts to retrieve all IPAddresses belonging to a pool. +func (h *Helper) GetIPAddressByPool(ctx context.Context, poolRef corev1.TypedLocalObjectReference) ([]ipamv1.IPAddress, error) { + addresses := &ipamv1.IPAddressList{} + + fieldSelector, err := fields.ParseSelector("spec.poolRef.name=" + poolRef.Name) + if err != nil { + return nil, err + } + + listOptions := client.ListOptions{FieldSelector: fieldSelector} + err = h.ctrlClient.List(ctx, addresses, &listOptions) + + if err != nil { + return nil, err + } + + addresses.Items = slices.DeleteFunc(addresses.Items, func(n ipamv1.IPAddress) bool { + // Check if we are actually dealing with the right resource kind. + groupVersion, _ := schema.ParseGroupVersion(n.APIVersion) + return groupVersion.Group != GetIpamInClusterAPIVersion() + }) + + // Sort result by IPAddress.Name to provide stability to testing. + slices.SortFunc(addresses.Items, func(a, b ipamv1.IPAddress) int { + return strings.Compare(a.Name, b.Name) + }) + + return addresses.Items, nil +} + func gvkForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersionKind, error) { gvk, err := apiutil.GVKForObject(obj, scheme) if err != nil { diff --git a/pkg/kubernetes/ipam/ipam_test.go b/pkg/kubernetes/ipam/ipam_test.go index 18a551b9..4f89ab00 100644 --- a/pkg/kubernetes/ipam/ipam_test.go +++ b/pkg/kubernetes/ipam/ipam_test.go @@ -36,7 +36,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/client/fake" - infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" + infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha2" + . "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/consts" ) type IPAMTestSuite struct { @@ -91,7 +92,7 @@ func (s *IPAMTestSuite) Test_CreateOrUpdateInClusterIPPool() { s.Equal(pool.Spec.Prefix, 24) s.cluster.Spec.IPv4Config.Gateway = "10.11.0.0" - s.cluster.Spec.IPv4Config.Metric = ptr.To(uint32(123)) + s.cluster.Spec.IPv4Config.Metric = ptr.To(int32(123)) ipamConfig = s.cluster.Spec.IPv4Config @@ -120,7 +121,7 @@ func (s *IPAMTestSuite) Test_CreateOrUpdateInClusterIPPool() { Addresses: []string{"2001:db8::/64"}, Prefix: 64, Gateway: "2001:db8::1", - Metric: ptr.To(uint32(123)), + Metric: ptr.To(int32(123)), } s.NoError(s.helper.CreateOrUpdateInClusterIPPool(s.ctx)) @@ -136,7 +137,7 @@ func (s *IPAMTestSuite) Test_CreateOrUpdateInClusterIPPool() { } func (s *IPAMTestSuite) Test_GetDefaultInClusterIPPool() { - notFound, err := s.helper.GetDefaultInClusterIPPool(s.ctx, infrav1.IPV4Format) + notFound, err := s.helper.GetDefaultInClusterIPPool(s.ctx, infrav1.IPv4Format) s.Nil(notFound) s.Error(err) s.True(apierrors.IsNotFound(err)) @@ -150,7 +151,7 @@ func (s *IPAMTestSuite) Test_GetDefaultInClusterIPPool() { Name: "test-cluster-v4-icip", }, &pool)) - found, err := s.helper.GetDefaultInClusterIPPool(s.ctx, infrav1.IPV4Format) + found, err := s.helper.GetDefaultInClusterIPPool(s.ctx, infrav1.IPv4Format) s.NoError(err) s.Equal(&pool, found) @@ -170,7 +171,7 @@ func (s *IPAMTestSuite) Test_GetDefaultInClusterIPPool() { Name: "test-cluster-v6-icip", }, &poolV6)) - foundV6, err := s.helper.GetDefaultInClusterIPPool(s.ctx, infrav1.IPV6Format) + foundV6, err := s.helper.GetDefaultInClusterIPPool(s.ctx, infrav1.IPv6Format) s.NoError(err) s.Equal(&poolV6, foundV6) } @@ -179,7 +180,7 @@ func (s *IPAMTestSuite) Test_GetInClusterIPPool() { notFound, err := s.helper.GetInClusterIPPool(s.ctx, &corev1.TypedLocalObjectReference{ Name: "simple-pool", APIGroup: ptr.To("ipam.cluster.x-k8s.io"), - Kind: "InClusterIPPool", + Kind: InClusterIPPool, }) s.Nil(notFound) s.Error(err) @@ -197,7 +198,7 @@ func (s *IPAMTestSuite) Test_GetInClusterIPPool() { found, err := s.helper.GetInClusterIPPool(s.ctx, &corev1.TypedLocalObjectReference{ APIGroup: ptr.To("ipam.cluster.x-k8s.io"), Name: "test-cluster-v4-icip", - Kind: "InClusterIPPool"}) + Kind: InClusterIPPool}) s.NoError(err) s.Equal(&pool, found) } @@ -206,7 +207,7 @@ func (s *IPAMTestSuite) Test_GetGlobalInClusterIPPool() { notFound, err := s.helper.GetGlobalInClusterIPPool(s.ctx, &corev1.TypedLocalObjectReference{ Name: "simple-global-pool", APIGroup: ptr.To("ipam.cluster.x-k8s.io"), - Kind: "GlobalInClusterIPPool"}) + Kind: GlobalInClusterIPPool}) s.Nil(notFound) s.Error(err) s.True(apierrors.IsNotFound(err)) @@ -231,7 +232,7 @@ func (s *IPAMTestSuite) Test_GetGlobalInClusterIPPool() { found, err := s.helper.GetGlobalInClusterIPPool(s.ctx, &corev1.TypedLocalObjectReference{ Name: "test-global-cluster-icip", APIGroup: ptr.To("ipam.cluster.x-k8s.io"), - Kind: "GlobalInClusterIPPool"}) + Kind: GlobalInClusterIPPool}) s.NoError(err) s.Equal(&pool, found) @@ -246,7 +247,7 @@ func (s *IPAMTestSuite) Test_GetIPPoolAnnotations() { Name: "test-cluster-v4-icip", }, &pool)) - err := s.helper.CreateIPAddressClaim(s.ctx, getCluster(), "net0", infrav1.IPV4Format, "test-cluster", nil) + err := s.helper.CreateIPAddressClaim(s.ctx, getCluster(), "net0", infrav1.IPv4Format, "test-cluster", nil) s.NoError(err) // create a dummy IPAddress. @@ -282,9 +283,9 @@ func (s *IPAMTestSuite) Test_GetIPPoolAnnotations() { Name: "test-ippool-annotations", }, &globalPool)) - err = s.helper.CreateIPAddressClaim(s.ctx, getCluster(), "net0", infrav1.IPV4Format, "test-cluster", &corev1.TypedLocalObjectReference{ + err = s.helper.CreateIPAddressClaim(s.ctx, getCluster(), "net0", infrav1.IPv4Format, "test-cluster", &corev1.TypedLocalObjectReference{ Name: "test-ippool-annotations", - Kind: "GlobalInClusterIPPool", + Kind: GlobalInClusterIPPool, APIGroup: ptr.To("ipam.cluster.x-k8s.io"), }) s.NoError(err) @@ -334,7 +335,7 @@ func (s *IPAMTestSuite) Test_CreateIPAddressClaim() { device := "net0" rootClusterName := "test" - err := s.helper.CreateIPAddressClaim(s.ctx, getCluster(), device, infrav1.IPV4Format, rootClusterName, nil) + err := s.helper.CreateIPAddressClaim(s.ctx, getCluster(), device, infrav1.IPv4Format, rootClusterName, nil) s.NoError(err) // Ensure cluster label is set. @@ -367,9 +368,9 @@ func (s *IPAMTestSuite) Test_CreateIPAddressClaim() { additionalDevice := "net1" - err = s.helper.CreateIPAddressClaim(s.ctx, getCluster(), additionalDevice, infrav1.IPV4Format, "test-cluster", &corev1.TypedLocalObjectReference{ + err = s.helper.CreateIPAddressClaim(s.ctx, getCluster(), additionalDevice, infrav1.IPv4Format, "test-cluster", &corev1.TypedLocalObjectReference{ Name: "test-additional-cluster-icip", - Kind: "InClusterIPPool", + Kind: InClusterIPPool, APIGroup: ptr.To("ipam.cluster.x-k8s.io"), }) s.NoError(err) @@ -393,14 +394,14 @@ func (s *IPAMTestSuite) Test_CreateIPAddressClaim() { globalDevice := "net2" - err = s.helper.CreateIPAddressClaim(s.ctx, getCluster(), globalDevice, infrav1.IPV4Format, "test-cluster", &corev1.TypedLocalObjectReference{ + err = s.helper.CreateIPAddressClaim(s.ctx, getCluster(), globalDevice, infrav1.IPv4Format, "test-cluster", &corev1.TypedLocalObjectReference{ Name: "test-global-cluster-icip", - Kind: "GlobalInClusterIPPool", + Kind: GlobalInClusterIPPool, APIGroup: ptr.To("ipam.cluster.x-k8s.io"), }) s.NoError(err) - // IPV6. + // IPv6. s.cluster.Spec.IPv6Config = &infrav1.IPConfigSpec{ Addresses: []string{"2001:db8::/64"}, Prefix: 64, @@ -414,7 +415,7 @@ func (s *IPAMTestSuite) Test_CreateIPAddressClaim() { Name: "test-cluster-v6-icip", }, &poolV6)) - err = s.helper.CreateIPAddressClaim(s.ctx, getCluster(), device, infrav1.IPV6Format, "test-cluster", nil) + err = s.helper.CreateIPAddressClaim(s.ctx, getCluster(), device, infrav1.IPv6Format, "test-cluster", nil) s.NoError(err) } @@ -427,8 +428,8 @@ func (s *IPAMTestSuite) Test_GetIPAddress() { Name: "test-cluster-v4-icip", }, &pool)) - err := s.helper.CreateIPAddressClaim(s.ctx, getCluster(), "net0", infrav1.IPV4Format, "test-cluster", &corev1.TypedLocalObjectReference{ - Kind: "InClusterIPPool", + err := s.helper.CreateIPAddressClaim(s.ctx, getCluster(), "net0", infrav1.IPv4Format, "test-cluster", &corev1.TypedLocalObjectReference{ + Kind: InClusterIPPool, Name: "test-cluster-v4-icip", }) s.NoError(err) diff --git a/pkg/proxmox/client.go b/pkg/proxmox/client.go index 246bd6b6..21c6d94c 100644 --- a/pkg/proxmox/client.go +++ b/pkg/proxmox/client.go @@ -40,7 +40,7 @@ type Client interface { GetTask(ctx context.Context, upID string) (*proxmox.Task, error) - GetReservableMemoryBytes(ctx context.Context, nodeName string, nodeMemoryAdjustment uint64) (uint64, error) + GetReservableMemoryBytes(ctx context.Context, nodeName string, nodeMemoryAdjustment int64) (uint64, error) ResizeDisk(ctx context.Context, vm *proxmox.VirtualMachine, disk, size string) (*proxmox.Task, error) diff --git a/pkg/proxmox/goproxmox/api_client.go b/pkg/proxmox/goproxmox/api_client.go index fa38f960..4faebc4a 100644 --- a/pkg/proxmox/goproxmox/api_client.go +++ b/pkg/proxmox/goproxmox/api_client.go @@ -253,7 +253,7 @@ func (c *APIClient) GetTask(ctx context.Context, upID string) (*proxmox.Task, er } // GetReservableMemoryBytes returns the memory that can be reserved by a new VM, in bytes. -func (c *APIClient) GetReservableMemoryBytes(ctx context.Context, nodeName string, nodeMemoryAdjustment uint64) (uint64, error) { +func (c *APIClient) GetReservableMemoryBytes(ctx context.Context, nodeName string, nodeMemoryAdjustment int64) (uint64, error) { node, err := c.Client.Node(ctx, nodeName) if err != nil { return 0, fmt.Errorf("cannot find node with name %s: %w", nodeName, err) diff --git a/pkg/proxmox/goproxmox/api_client_test.go b/pkg/proxmox/goproxmox/api_client_test.go index 2d7553b7..40759a87 100644 --- a/pkg/proxmox/goproxmox/api_client_test.go +++ b/pkg/proxmox/goproxmox/api_client_test.go @@ -54,7 +54,7 @@ func TestProxmoxAPIClient_GetReservableMemoryBytes(t *testing.T) { name string maxMem uint64 // memory size of already provisioned guest expect uint64 // expected available memory of the host - nodeMemoryAdjustment uint64 // factor like 1.0 to multiply host memory with for overprovisioning + nodeMemoryAdjustment int64 // factor like 100 to multiply host memory with for overprovisioning }{ { name: "under zero - no overprovisioning", diff --git a/pkg/proxmox/proxmoxtest/mock_client.go b/pkg/proxmox/proxmoxtest/mock_client.go index af6e6f3a..0604a534 100644 --- a/pkg/proxmox/proxmoxtest/mock_client.go +++ b/pkg/proxmox/proxmoxtest/mock_client.go @@ -454,7 +454,7 @@ func (_c *MockClient_FindVMTemplateByTags_Call) RunAndReturn(run func(context.Co } // GetReservableMemoryBytes provides a mock function with given fields: ctx, nodeName, nodeMemoryAdjustment -func (_m *MockClient) GetReservableMemoryBytes(ctx context.Context, nodeName string, nodeMemoryAdjustment uint64) (uint64, error) { +func (_m *MockClient) GetReservableMemoryBytes(ctx context.Context, nodeName string, nodeMemoryAdjustment int64) (uint64, error) { ret := _m.Called(ctx, nodeName, nodeMemoryAdjustment) if len(ret) == 0 { @@ -463,16 +463,16 @@ func (_m *MockClient) GetReservableMemoryBytes(ctx context.Context, nodeName str var r0 uint64 var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string, uint64) (uint64, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, string, int64) (uint64, error)); ok { return rf(ctx, nodeName, nodeMemoryAdjustment) } - if rf, ok := ret.Get(0).(func(context.Context, string, uint64) uint64); ok { + if rf, ok := ret.Get(0).(func(context.Context, string, int64) uint64); ok { r0 = rf(ctx, nodeName, nodeMemoryAdjustment) } else { r0 = ret.Get(0).(uint64) } - if rf, ok := ret.Get(1).(func(context.Context, string, uint64) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, string, int64) error); ok { r1 = rf(ctx, nodeName, nodeMemoryAdjustment) } else { r1 = ret.Error(1) @@ -489,14 +489,14 @@ type MockClient_GetReservableMemoryBytes_Call struct { // GetReservableMemoryBytes is a helper method to define mock.On call // - ctx context.Context // - nodeName string -// - nodeMemoryAdjustment uint64 +// - nodeMemoryAdjustment int64 func (_e *MockClient_Expecter) GetReservableMemoryBytes(ctx interface{}, nodeName interface{}, nodeMemoryAdjustment interface{}) *MockClient_GetReservableMemoryBytes_Call { return &MockClient_GetReservableMemoryBytes_Call{Call: _e.mock.On("GetReservableMemoryBytes", ctx, nodeName, nodeMemoryAdjustment)} } -func (_c *MockClient_GetReservableMemoryBytes_Call) Run(run func(ctx context.Context, nodeName string, nodeMemoryAdjustment uint64)) *MockClient_GetReservableMemoryBytes_Call { +func (_c *MockClient_GetReservableMemoryBytes_Call) Run(run func(ctx context.Context, nodeName string, nodeMemoryAdjustment int64)) *MockClient_GetReservableMemoryBytes_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(string), args[2].(uint64)) + run(args[0].(context.Context), args[1].(string), args[2].(int64)) }) return _c } @@ -506,7 +506,7 @@ func (_c *MockClient_GetReservableMemoryBytes_Call) Return(_a0 uint64, _a1 error return _c } -func (_c *MockClient_GetReservableMemoryBytes_Call) RunAndReturn(run func(context.Context, string, uint64) (uint64, error)) *MockClient_GetReservableMemoryBytes_Call { +func (_c *MockClient_GetReservableMemoryBytes_Call) RunAndReturn(run func(context.Context, string, int64) (uint64, error)) *MockClient_GetReservableMemoryBytes_Call { _c.Call.Return(run) return _c } diff --git a/pkg/scope/cluster.go b/pkg/scope/cluster.go index ac5c8d24..8c51394e 100644 --- a/pkg/scope/cluster.go +++ b/pkg/scope/cluster.go @@ -38,7 +38,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" - infrav1alpha1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" + infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha2" "github.com/ionos-cloud/cluster-api-provider-proxmox/internal/tlshelper" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/kubernetes/ipam" capmox "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/proxmox" @@ -50,7 +50,7 @@ type ClusterScopeParams struct { Client client.Client Logger *logr.Logger Cluster *clusterv1.Cluster - ProxmoxCluster *infrav1alpha1.ProxmoxCluster + ProxmoxCluster *infrav1.ProxmoxCluster ProxmoxClient capmox.Client ControllerName string IPAMHelper *ipam.Helper @@ -63,7 +63,7 @@ type ClusterScope struct { patchHelper *patch.Helper Cluster *clusterv1.Cluster - ProxmoxCluster *infrav1alpha1.ProxmoxCluster + ProxmoxCluster *infrav1.ProxmoxCluster ProxmoxClient capmox.Client controllerName string @@ -208,7 +208,7 @@ func (s *ClusterScope) PatchObject() error { // always update the readyCondition. conditions.SetSummary(s.ProxmoxCluster, conditions.WithConditions( - infrav1alpha1.ProxmoxClusterReady, + infrav1.ProxmoxClusterReady, ), ) @@ -216,8 +216,8 @@ func (s *ClusterScope) PatchObject() error { } // ListProxmoxMachinesForCluster returns all the ProxmoxMachines that belong to this cluster. -func (s *ClusterScope) ListProxmoxMachinesForCluster(ctx context.Context) ([]infrav1alpha1.ProxmoxMachine, error) { - var machineList infrav1alpha1.ProxmoxMachineList +func (s *ClusterScope) ListProxmoxMachinesForCluster(ctx context.Context) ([]infrav1.ProxmoxMachine, error) { + var machineList infrav1.ProxmoxMachineList err := s.client.List(ctx, &machineList, client.InNamespace(s.Namespace()), client.MatchingLabels{ clusterv1.ClusterNameLabel: s.Name(), diff --git a/pkg/scope/cluster_test.go b/pkg/scope/cluster_test.go index 48b99481..1d103769 100644 --- a/pkg/scope/cluster_test.go +++ b/pkg/scope/cluster_test.go @@ -31,7 +31,7 @@ import ( ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" - infrav1alpha1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" + infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha2" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/kubernetes/ipam" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/proxmox/goproxmox" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/proxmox/proxmoxtest" @@ -44,11 +44,11 @@ func TestNewClusterScope_MissingParams(t *testing.T) { name string params ClusterScopeParams }{ - {"missing client", ClusterScopeParams{Cluster: &clusterv1.Cluster{}, ProxmoxCluster: &infrav1alpha1.ProxmoxCluster{}, ProxmoxClient: &goproxmox.APIClient{}, IPAMHelper: &ipam.Helper{}}}, - {"missing cluster", ClusterScopeParams{Client: k8sClient, ProxmoxCluster: &infrav1alpha1.ProxmoxCluster{}, ProxmoxClient: &goproxmox.APIClient{}, IPAMHelper: &ipam.Helper{}}}, + {"missing client", ClusterScopeParams{Cluster: &clusterv1.Cluster{}, ProxmoxCluster: &infrav1.ProxmoxCluster{}, ProxmoxClient: &goproxmox.APIClient{}, IPAMHelper: &ipam.Helper{}}}, + {"missing cluster", ClusterScopeParams{Client: k8sClient, ProxmoxCluster: &infrav1.ProxmoxCluster{}, ProxmoxClient: &goproxmox.APIClient{}, IPAMHelper: &ipam.Helper{}}}, {"missing proxmox cluster", ClusterScopeParams{Client: k8sClient, Cluster: &clusterv1.Cluster{}, ProxmoxClient: &goproxmox.APIClient{}, IPAMHelper: &ipam.Helper{}}}, - {"missing ipam helper", ClusterScopeParams{Client: k8sClient, Cluster: &clusterv1.Cluster{}, ProxmoxCluster: &infrav1alpha1.ProxmoxCluster{}, ProxmoxClient: &goproxmox.APIClient{}}}, - {"missing proxmox client", ClusterScopeParams{Client: k8sClient, Cluster: &clusterv1.Cluster{}, ProxmoxCluster: &infrav1alpha1.ProxmoxCluster{}, IPAMHelper: &ipam.Helper{}}}, + {"missing ipam helper", ClusterScopeParams{Client: k8sClient, Cluster: &clusterv1.Cluster{}, ProxmoxCluster: &infrav1.ProxmoxCluster{}, ProxmoxClient: &goproxmox.APIClient{}}}, + {"missing proxmox client", ClusterScopeParams{Client: k8sClient, Cluster: &clusterv1.Cluster{}, ProxmoxCluster: &infrav1.ProxmoxCluster{}, IPAMHelper: &ipam.Helper{}}}, } for _, test := range tests { @@ -62,16 +62,16 @@ func TestNewClusterScope_MissingParams(t *testing.T) { func TestNewClusterScope_MissingProxmoxClient(t *testing.T) { k8sClient := getFakeClient(t) - proxmoxCluster := &infrav1alpha1.ProxmoxCluster{ + proxmoxCluster := &infrav1.ProxmoxCluster{ TypeMeta: metav1.TypeMeta{ - APIVersion: infrav1alpha1.GroupVersion.String(), + APIVersion: infrav1.GroupVersion.String(), Kind: "ProxmoxCluster", }, ObjectMeta: metav1.ObjectMeta{ Name: "proxmoxcluster", Namespace: "default", }, - Spec: infrav1alpha1.ProxmoxClusterSpec{ + Spec: infrav1.ProxmoxClusterSpec{ AllowedNodes: []string{"pve", "pve-2"}, }, } @@ -95,16 +95,16 @@ func TestNewClusterScope_MissingProxmoxClient(t *testing.T) { func TestNewClusterScope_SetupProxmoxClient(t *testing.T) { k8sClient := getFakeClient(t) - proxmoxCluster := &infrav1alpha1.ProxmoxCluster{ + proxmoxCluster := &infrav1.ProxmoxCluster{ TypeMeta: metav1.TypeMeta{ - APIVersion: infrav1alpha1.GroupVersion.String(), + APIVersion: infrav1.GroupVersion.String(), Kind: "ProxmoxCluster", }, ObjectMeta: metav1.ObjectMeta{ Name: "proxmoxcluster", Namespace: "default", }, - Spec: infrav1alpha1.ProxmoxClusterSpec{ + Spec: infrav1.ProxmoxClusterSpec{ AllowedNodes: []string{"pve", "pve-2"}, CredentialsRef: &corev1.SecretReference{ Name: "test-secret", @@ -153,16 +153,16 @@ func TestListProxmoxMachinesForCluster(t *testing.T) { err := k8sClient.Create(context.Background(), cluster) require.NoError(t, err) - proxmoxCluster := &infrav1alpha1.ProxmoxCluster{ + proxmoxCluster := &infrav1.ProxmoxCluster{ TypeMeta: metav1.TypeMeta{ - APIVersion: infrav1alpha1.GroupVersion.String(), + APIVersion: infrav1.GroupVersion.String(), Kind: "ProxmoxCluster", }, ObjectMeta: metav1.ObjectMeta{ Name: "proxmoxcluster", Namespace: "default", }, - Spec: infrav1alpha1.ProxmoxClusterSpec{ + Spec: infrav1.ProxmoxClusterSpec{ AllowedNodes: []string{"pve", "pve-2"}, CredentialsRef: &corev1.SecretReference{ Name: "test-secret", @@ -192,8 +192,8 @@ func TestListProxmoxMachinesForCluster(t *testing.T) { clusterScope, err := NewClusterScope(params) require.NoError(t, err) - expectedMachineList := &infrav1alpha1.ProxmoxMachineList{ - Items: []infrav1alpha1.ProxmoxMachine{ + expectedMachineList := &infrav1.ProxmoxMachineList{ + Items: []infrav1.ProxmoxMachine{ { ObjectMeta: metav1.ObjectMeta{ Name: "machine01", @@ -222,7 +222,7 @@ func TestListProxmoxMachinesForCluster(t *testing.T) { expectedMachineList.Items[machineIdx].ResourceVersion = "1" } - unexpectedMachine := &infrav1alpha1.ProxmoxMachine{ + unexpectedMachine := &infrav1.ProxmoxMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "other-cluster-machine01", Namespace: "default", @@ -248,7 +248,7 @@ func getFakeClient(t *testing.T) ctrlclient.Client { require.NoError(t, err) err = clusterv1.AddToScheme(scheme) require.NoError(t, err) - err = infrav1alpha1.AddToScheme(scheme) + err = infrav1.AddToScheme(scheme) require.NoError(t, err) return fake.NewClientBuilder().WithScheme(scheme).Build() diff --git a/pkg/scope/machine.go b/pkg/scope/machine.go index b1ba03d8..38c50a42 100644 --- a/pkg/scope/machine.go +++ b/pkg/scope/machine.go @@ -34,7 +34,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" - infrav1alpha1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" + infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha2" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/kubernetes/ipam" ) @@ -45,7 +45,7 @@ type MachineScopeParams struct { Cluster *clusterv1.Cluster Machine *clusterv1.Machine InfraCluster *ClusterScope - ProxmoxMachine *infrav1alpha1.ProxmoxMachine + ProxmoxMachine *infrav1.ProxmoxMachine IPAMHelper *ipam.Helper } @@ -58,7 +58,7 @@ type MachineScope struct { Cluster *clusterv1.Cluster Machine *clusterv1.Machine InfraCluster *ClusterScope - ProxmoxMachine *infrav1alpha1.ProxmoxMachine + ProxmoxMachine *infrav1.ProxmoxMachine IPAMHelper *ipam.Helper VirtualMachine *proxmox.VirtualMachine } @@ -169,12 +169,12 @@ func (m *MachineScope) SetVirtualMachineID(vmID int64) { // SetReady sets the ProxmoxMachine Ready Status. func (m *MachineScope) SetReady() { - m.ProxmoxMachine.Status.Ready = true + m.ProxmoxMachine.Status.Ready = ptr.To(true) } // SetNotReady sets the ProxmoxMachine Ready Status to false. func (m *MachineScope) SetNotReady() { - m.ProxmoxMachine.Status.Ready = false + m.ProxmoxMachine.Status.Ready = ptr.To(false) } // SetFailureMessage sets the ProxmoxMachine status failure message. @@ -210,7 +210,7 @@ func (m *MachineScope) PatchObject() error { // always update the readyCondition. conditions.SetSummary(m.ProxmoxMachine, conditions.WithConditions( - infrav1alpha1.VMProvisionedCondition, + infrav1.VMProvisionedCondition, ), ) @@ -220,7 +220,7 @@ func (m *MachineScope) PatchObject() error { m.ProxmoxMachine, patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ clusterv1.ReadyCondition, - infrav1alpha1.VMProvisionedCondition, + infrav1.VMProvisionedCondition, }}) } diff --git a/pkg/scope/machine_test.go b/pkg/scope/machine_test.go index a4b0247d..5df48467 100644 --- a/pkg/scope/machine_test.go +++ b/pkg/scope/machine_test.go @@ -28,7 +28,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client/fake" - infrav1alpha1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" + infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha2" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/kubernetes/ipam" ) @@ -39,13 +39,13 @@ func TestNewMachineScope_MissingParams(t *testing.T) { name string params MachineScopeParams }{ - {"missing client", MachineScopeParams{Cluster: &clusterv1.Cluster{}, Machine: &clusterv1.Machine{}, InfraCluster: &ClusterScope{}, ProxmoxMachine: &infrav1alpha1.ProxmoxMachine{}, IPAMHelper: &ipam.Helper{}}}, - {"missing machine", MachineScopeParams{Client: client, Cluster: &clusterv1.Cluster{}, InfraCluster: &ClusterScope{}, ProxmoxMachine: &infrav1alpha1.ProxmoxMachine{}, IPAMHelper: &ipam.Helper{}}}, - {"missing cluster", MachineScopeParams{Client: client, Machine: &clusterv1.Machine{}, InfraCluster: &ClusterScope{}, ProxmoxMachine: &infrav1alpha1.ProxmoxMachine{}, IPAMHelper: &ipam.Helper{}}}, + {"missing client", MachineScopeParams{Cluster: &clusterv1.Cluster{}, Machine: &clusterv1.Machine{}, InfraCluster: &ClusterScope{}, ProxmoxMachine: &infrav1.ProxmoxMachine{}, IPAMHelper: &ipam.Helper{}}}, + {"missing machine", MachineScopeParams{Client: client, Cluster: &clusterv1.Cluster{}, InfraCluster: &ClusterScope{}, ProxmoxMachine: &infrav1.ProxmoxMachine{}, IPAMHelper: &ipam.Helper{}}}, + {"missing cluster", MachineScopeParams{Client: client, Machine: &clusterv1.Machine{}, InfraCluster: &ClusterScope{}, ProxmoxMachine: &infrav1.ProxmoxMachine{}, IPAMHelper: &ipam.Helper{}}}, {"missing proxmox machine", MachineScopeParams{Client: client, Cluster: &clusterv1.Cluster{}, Machine: &clusterv1.Machine{}, InfraCluster: &ClusterScope{}, IPAMHelper: &ipam.Helper{}}}, - {"missing proxmox cluster", MachineScopeParams{Client: client, Cluster: &clusterv1.Cluster{}, Machine: &clusterv1.Machine{}, ProxmoxMachine: &infrav1alpha1.ProxmoxMachine{}, IPAMHelper: &ipam.Helper{}}}, - {"missing ipam helper", MachineScopeParams{Client: client, Cluster: &clusterv1.Cluster{}, Machine: &clusterv1.Machine{}, InfraCluster: &ClusterScope{}, ProxmoxMachine: &infrav1alpha1.ProxmoxMachine{}}}, - {"missing scheme", MachineScopeParams{Client: client, Cluster: &clusterv1.Cluster{}, Machine: &clusterv1.Machine{}, InfraCluster: &ClusterScope{}, ProxmoxMachine: &infrav1alpha1.ProxmoxMachine{}, IPAMHelper: &ipam.Helper{}}}, + {"missing proxmox cluster", MachineScopeParams{Client: client, Cluster: &clusterv1.Cluster{}, Machine: &clusterv1.Machine{}, ProxmoxMachine: &infrav1.ProxmoxMachine{}, IPAMHelper: &ipam.Helper{}}}, + {"missing ipam helper", MachineScopeParams{Client: client, Cluster: &clusterv1.Cluster{}, Machine: &clusterv1.Machine{}, InfraCluster: &ClusterScope{}, ProxmoxMachine: &infrav1.ProxmoxMachine{}}}, + {"missing scheme", MachineScopeParams{Client: client, Cluster: &clusterv1.Cluster{}, Machine: &clusterv1.Machine{}, InfraCluster: &ClusterScope{}, ProxmoxMachine: &infrav1.ProxmoxMachine{}, IPAMHelper: &ipam.Helper{}}}, } for _, test := range tests { @@ -69,8 +69,8 @@ func TestMachineScope_Role(t *testing.T) { } func TestMachineScope_GetProviderID(t *testing.T) { - p := infrav1alpha1.ProxmoxMachine{ - Spec: infrav1alpha1.ProxmoxMachineSpec{}, + p := infrav1.ProxmoxMachine{ + Spec: ptr.To(infrav1.ProxmoxMachineSpec{}), } scope := MachineScope{ ProxmoxMachine: &p, @@ -83,8 +83,8 @@ func TestMachineScope_GetProviderID(t *testing.T) { } func TestMachineScope_GetVirtualMachineID(t *testing.T) { - p := infrav1alpha1.ProxmoxMachine{ - Spec: infrav1alpha1.ProxmoxMachineSpec{}, + p := infrav1.ProxmoxMachine{ + Spec: ptr.To(infrav1.ProxmoxMachineSpec{}), } scope := MachineScope{ ProxmoxMachine: &p, @@ -97,23 +97,23 @@ func TestMachineScope_GetVirtualMachineID(t *testing.T) { } func TestMachineScope_SetReady(t *testing.T) { - p := infrav1alpha1.ProxmoxMachine{ - Spec: infrav1alpha1.ProxmoxMachineSpec{}, + p := infrav1.ProxmoxMachine{ + Spec: ptr.To(infrav1.ProxmoxMachineSpec{}), } scope := MachineScope{ ProxmoxMachine: &p, } scope.SetReady() - require.True(t, scope.ProxmoxMachine.Status.Ready) + require.True(t, *scope.ProxmoxMachine.Status.Ready) scope.SetNotReady() - require.False(t, scope.ProxmoxMachine.Status.Ready) + require.False(t, *scope.ProxmoxMachine.Status.Ready) } func TestMachineScope_HasFailed(t *testing.T) { - p := infrav1alpha1.ProxmoxMachine{ - Spec: infrav1alpha1.ProxmoxMachineSpec{}, + p := infrav1.ProxmoxMachine{ + Spec: ptr.To(infrav1.ProxmoxMachineSpec{}), } scope := MachineScope{ ProxmoxMachine: &p, @@ -123,12 +123,12 @@ func TestMachineScope_HasFailed(t *testing.T) { } func TestMachineScope_SkipQemuCheckEnabled(t *testing.T) { - p := infrav1alpha1.ProxmoxMachine{ - Spec: infrav1alpha1.ProxmoxMachineSpec{ - Checks: &infrav1alpha1.ProxmoxMachineChecks{ + p := infrav1.ProxmoxMachine{ + Spec: ptr.To(infrav1.ProxmoxMachineSpec{ + Checks: &infrav1.ProxmoxMachineChecks{ SkipCloudInitStatus: ptr.To(true), }, - }, + }), } scope := MachineScope{ ProxmoxMachine: &p, @@ -138,8 +138,8 @@ func TestMachineScope_SkipQemuCheckEnabled(t *testing.T) { } func TestMachineScope_SkipQemuCheck(t *testing.T) { - p := infrav1alpha1.ProxmoxMachine{ - Spec: infrav1alpha1.ProxmoxMachineSpec{}, + p := infrav1.ProxmoxMachine{ + Spec: ptr.To(infrav1.ProxmoxMachineSpec{}), } scope := MachineScope{ ProxmoxMachine: &p, @@ -149,12 +149,12 @@ func TestMachineScope_SkipQemuCheck(t *testing.T) { } func TestMachineScope_SkipCloudInitCheckEnabled(t *testing.T) { - p := infrav1alpha1.ProxmoxMachine{ - Spec: infrav1alpha1.ProxmoxMachineSpec{ - Checks: &infrav1alpha1.ProxmoxMachineChecks{ + p := infrav1.ProxmoxMachine{ + Spec: ptr.To(infrav1.ProxmoxMachineSpec{ + Checks: &infrav1.ProxmoxMachineChecks{ SkipCloudInitStatus: ptr.To(true), }, - }, + }), } scope := MachineScope{ ProxmoxMachine: &p, @@ -164,8 +164,8 @@ func TestMachineScope_SkipCloudInitCheckEnabled(t *testing.T) { } func TestMachineScope_SkipCloudInit(t *testing.T) { - p := infrav1alpha1.ProxmoxMachine{ - Spec: infrav1alpha1.ProxmoxMachineSpec{}, + p := infrav1.ProxmoxMachine{ + Spec: ptr.To(infrav1.ProxmoxMachineSpec{}), } scope := MachineScope{ ProxmoxMachine: &p, @@ -175,12 +175,12 @@ func TestMachineScope_SkipCloudInit(t *testing.T) { } func TestMachineScope_SkipQemuDisablesCloudInitCheck(t *testing.T) { - p := infrav1alpha1.ProxmoxMachine{ - Spec: infrav1alpha1.ProxmoxMachineSpec{ - Checks: &infrav1alpha1.ProxmoxMachineChecks{ + p := infrav1.ProxmoxMachine{ + Spec: ptr.To(infrav1.ProxmoxMachineSpec{ + Checks: &infrav1.ProxmoxMachineChecks{ SkipQemuGuestAgent: ptr.To(true), }, - }, + }), } scope := MachineScope{ ProxmoxMachine: &p, @@ -191,9 +191,9 @@ func TestMachineScope_SkipQemuDisablesCloudInitCheck(t *testing.T) { func TestMachineScope_GetBootstrapSecret(t *testing.T) { client := fake.NewClientBuilder().Build() - p := infrav1alpha1.ProxmoxMachine{ + p := infrav1.ProxmoxMachine{ ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "test"}, - Spec: infrav1alpha1.ProxmoxMachineSpec{}, + Spec: ptr.To(infrav1.ProxmoxMachineSpec{}), } m := clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "test"}, diff --git a/pkg/types/network.go b/pkg/types/network.go index 6f185c33..f41fa351 100644 --- a/pkg/types/network.go +++ b/pkg/types/network.go @@ -17,41 +17,35 @@ limitations under the License. // Package types provides common types used in cloudinit & ignition. package types +import infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha2" + // NetworkConfigData is used to render network-config. type NetworkConfigData struct { - ProxName string // Device name in Proxmox - MacAddress string - DHCP4 bool - DHCP6 bool - IPAddress string - IPV6Address string - Gateway string - Metric *uint32 - Gateway6 string - Metric6 *uint32 - DNSServers []string - Type string - Name string - Interfaces []string // Interfaces controlled by this one. - Table uint32 // linux routing table number for VRF. - Routes []RoutingData - FIBRules []FIBRuleData // Forwarding information block for routing. - LinkMTU *uint16 // linux network device MTU - VRF string // linux VRF name // only used in networkd config. + ProxName infrav1.NetName // Device name in Proxmox + MacAddress string + DHCP4 bool + DHCP6 bool + IPConfigs []IPConfig + DNSServers []string + Type string + Name string + Interfaces []string // Interfaces controlled by this one. + Table int32 // linux routing table number for VRF. + Routes []RoutingData + FIBRules []FIBRuleData // Forwarding information block for routing. + LinkMTU infrav1.MTU // linux network device MTU + VRF string // linux VRF name // only used in networkd config. } -// RoutingData stores routing configuration. -type RoutingData struct { - To string - Via string - Metric uint32 - Table uint32 +// IPConfig stores IP configuration. +type IPConfig struct { + IPAddress string + Gateway string + Metric *int32 } +// RoutingData stores routing configuration. +type RoutingData = infrav1.RouteSpec + // FIBRuleData stores forward information base rules (routing policies). -type FIBRuleData struct { - To string - From string - Priority uint32 - Table uint32 -} +type FIBRuleData = infrav1.RoutingPolicySpec diff --git a/test/helpers/envtest.go b/test/helpers/envtest.go index bbb2b5a6..a8d8ab27 100644 --- a/test/helpers/envtest.go +++ b/test/helpers/envtest.go @@ -44,7 +44,7 @@ import ( metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" "sigs.k8s.io/controller-runtime/pkg/webhook" - infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" + infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha2" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/proxmox" )