diff --git a/go.mod b/go.mod index f3cd7c7bbac2737b980226543895e527e9024641..38fe94e026ec9c70f44d06f8edc9559a26c5864f 100644 --- a/go.mod +++ b/go.mod @@ -29,7 +29,7 @@ require ( k8s.io/kubectl v0.30.0 k8s.io/utils v0.0.0-20240310230437-4693a0247e57 sigs.k8s.io/cluster-api v1.6.1 - sigs.k8s.io/controller-runtime v0.18.3 + sigs.k8s.io/controller-runtime v0.18.4 sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240522175850-2e9781e9fc60 sigs.k8s.io/yaml v1.4.0 ) diff --git a/go.sum b/go.sum index 32e7cfb2d57733ddebaa02221305f485bed99b23..b89efd6c20a7e24918b455cdc8aa655966faee72 100644 --- a/go.sum +++ b/go.sum @@ -778,8 +778,8 @@ mvdan.cc/unparam v0.0.0-20240427195214-063aff900ca1 h1:Nykk7fggxChwLK4rUPYESzeIw mvdan.cc/unparam v0.0.0-20240427195214-063aff900ca1/go.mod h1:ZzZjEpJDOmx8TdVU6umamY3Xy0UAQUI2DHbf05USVbI= sigs.k8s.io/cluster-api v1.6.1 h1:I34p/fwgRlEhs+o9cUhKXDwNNfPS3no0yJsd2bJyQVc= sigs.k8s.io/cluster-api v1.6.1/go.mod h1:DaxwruDvSaEYq5q6FREDaGzX6UsAVUCA99Sp8vfMHyQ= -sigs.k8s.io/controller-runtime v0.18.3 h1:B5Wmmo8WMWK7izei+2LlXLVDGzMwAHBNLX68lwtlSR4= -sigs.k8s.io/controller-runtime v0.18.3/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= +sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw= +sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240522175850-2e9781e9fc60 h1:ihaeBTCFuEYPL1T1/FqAavDY7z5UcKSnWpnb+I3DYeM= sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240522175850-2e9781e9fc60/go.mod h1:4+4tM2Es0ycqPedATtzPer5RTrUq3Xab59BYogt0mCE= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/pkg/controller/machine/machine_controller_test.go b/pkg/controller/machine/machine_controller_test.go index f5271b119cccdf485a908b24f0b9a7a88d8743c3..d6044aeb14c9095aa6508cc2d476403de4b2c89e 100644 --- a/pkg/controller/machine/machine_controller_test.go +++ b/pkg/controller/machine/machine_controller_test.go @@ -31,8 +31,6 @@ import ( "github.com/openshift/cluster-control-plane-machine-set-operator/test/e2e/framework" testutils "github.com/openshift/machine-api-operator/pkg/util/testing" - apierrs "k8s.io/apimachinery/pkg/api/errors" - utilnet "k8s.io/apimachinery/pkg/util/net" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest/komega" "sigs.k8s.io/controller-runtime/pkg/manager" @@ -144,11 +142,6 @@ var _ = Describe("Machine Reconciler", func() { HaveField("Status", Equal(corev1.ConditionTrue)), )))) - By("Transitioning the AuthoritativeAPI though 'Migrating' to MachineAPI") - Eventually(k.UpdateStatus(instance, func() { - instance.Status.AuthoritativeAPI = machinev1.MachineAuthorityMigrating - })).Should(Succeed()) - // The condition should remain true whilst transitioning through 'Migrating' // Run this in a goroutine so we don't block @@ -164,7 +157,7 @@ var _ = Describe("Machine Reconciler", func() { localInstance := instanceCopy.DeepCopy() if err := k8sClient.Get(ctx, client.ObjectKeyFromObject(localInstance), localInstance); err != nil { - return g.Expect(err).Should(WithTransform(isRetryableAPIError, BeTrue()), "expected temporary error while getting machine: %v", err) + return g.Expect(err).Should(WithTransform(testutils.IsRetryableAPIError, BeTrue()), "expected temporary error while getting machine: %v", err) } return g.Expect(localInstance.Status.Conditions).Should(ContainElement(SatisfyAll( @@ -179,13 +172,18 @@ var _ = Describe("Machine Reconciler", func() { localInstance := instanceCopy.DeepCopy() if err := k8sClient.Get(ctx, client.ObjectKeyFromObject(localInstance), localInstance); err != nil { - return g.Expect(err).Should(WithTransform(isRetryableAPIError, BeTrue()), "expected temporary error while getting machine: %v", err) + return g.Expect(err).Should(WithTransform(testutils.IsRetryableAPIError, BeTrue()), "expected temporary error while getting machine: %v", err) } - return g.Expect(localInstance.Status.AuthoritativeAPI).ToNot(Equal(machinev1.MachineAuthorityMachineAPI)) + return g.Expect(localInstance.Status.AuthoritativeAPI).To(Equal(machinev1.MachineAuthorityMachineAPI)) }) }() + By("Transitioning the AuthoritativeAPI though 'Migrating' to MachineAPI") + Eventually(k.UpdateStatus(instance, func() { + instance.Status.AuthoritativeAPI = machinev1.MachineAuthorityMigrating + })).Should(Succeed()) + By("Updating the AuthoritativeAPI from Migrating to MachineAPI") Eventually(k.UpdateStatus(instance, func() { instance.Status.AuthoritativeAPI = machinev1.MachineAuthorityMachineAPI @@ -220,21 +218,3 @@ func cleanResources(namespace string) error { return nil } - -// isRetryableAPIError returns whether an API error is retryable or not. -// inspired by: k8s.io/kubernetes/test/utils. -func isRetryableAPIError(err error) bool { - // These errors may indicate a transient error that we can retry in tests. - if apierrs.IsInternalError(err) || apierrs.IsTimeout(err) || apierrs.IsServerTimeout(err) || - apierrs.IsTooManyRequests(err) || utilnet.IsProbableEOF(err) || utilnet.IsConnectionReset(err) || - utilnet.IsHTTP2ConnectionLost(err) { - return true - } - - // If the error sends the Retry-After header, we respect it as an explicit confirmation we should retry. - if _, shouldRetry := apierrs.SuggestsClientDelay(err); shouldRetry { - return true - } - - return false -} diff --git a/pkg/controller/machineset/controller.go b/pkg/controller/machineset/controller.go index 151c591b92c51604c8f8770d99d7c7490a655e54..346bd22a117b1e800c8ef83b68112825870f85af 100644 --- a/pkg/controller/machineset/controller.go +++ b/pkg/controller/machineset/controller.go @@ -25,8 +25,10 @@ import ( "sync" "time" + openshiftfeatures "github.com/openshift/api/features" machinev1 "github.com/openshift/api/machine/v1beta1" "github.com/openshift/machine-api-operator/pkg/util" + "github.com/openshift/machine-api-operator/pkg/util/conditions" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -45,6 +47,14 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" ) +const ( + PausedCondition machinev1.ConditionType = "Paused" + + PausedConditionReason = "AuthoritativeAPI is not set to MachineAPI" + + NotPausedConditionReason = "AuthoritativeAPI is set to MachineAPI" +) + var ( controllerKind = machinev1.SchemeGroupVersion.WithKind("MachineSet") @@ -170,6 +180,42 @@ func (r *ReconcileMachineSet) Reconcile(ctx context.Context, request reconcile.R return reconcile.Result{}, nil } + if r.gate.Enabled(featuregate.Feature(openshiftfeatures.FeatureGateMachineAPIMigration)) { + machineSetCopy := machineSet.DeepCopy() + // Check Status.AuthoritativeAPI. If it's not set to MachineAPI. Set the + // paused condition true and return early. + // + // Once we have a webhook, we want to remove the check that the AuthoritativeAPI + // field is populated. + if machineSet.Status.AuthoritativeAPI != "" && + machineSet.Status.AuthoritativeAPI != machinev1.MachineAuthorityMachineAPI { + conditions.Set(machineSetCopy, conditions.TrueConditionWithReason( + PausedCondition, + PausedConditionReason, + "The AuthoritativeAPI is set to %s", machineSet.Status.AuthoritativeAPI, + )) + + _, err := updateMachineSetStatus(r.Client, machineSet, machineSetCopy.Status) + if err != nil { + klog.Errorf("%v: error updating status: %v", machineSet.Name, err) + } + + klog.Infof("%v: machine is paused, taking no further action", machineSet.Name) + return reconcile.Result{}, nil + } + + conditions.Set(machineSetCopy, conditions.FalseCondition( + PausedCondition, + NotPausedConditionReason, + "The AuthoritativeAPI is set to %s", string(machineSet.Status.AuthoritativeAPI), + )) + _, err := updateMachineSetStatus(r.Client, machineSet, machineSetCopy.Status) + if err != nil { + klog.Errorf("%v: error updating status: %v", machineSet.Name, err) + } + klog.Infof("%v: setting paused to false and continuing reconcile", machineSet.Name) + } + result, err := r.reconcile(ctx, machineSet) if err != nil { klog.Errorf("Failed to reconcile MachineSet %q: %v", request.NamespacedName, err) diff --git a/pkg/controller/machineset/controller_test.go b/pkg/controller/machineset/controller_test.go index 03450d058f411dce7ea81d4923b6263a9dbfb2ad..481c5c535f99801dd19c9c65d78d4e8883601543 100644 --- a/pkg/controller/machineset/controller_test.go +++ b/pkg/controller/machineset/controller_test.go @@ -57,6 +57,9 @@ func TestMachineSetToMachines(t *testing.T) { }, }, }, + Status: machinev1.MachineSetStatus{ + AuthoritativeAPI: machinev1.MachineAuthorityMachineAPI, + }, }, }, } @@ -153,7 +156,11 @@ func TestShouldExcludeMachine(t *testing.T) { expected bool }{ { - machineSet: machinev1.MachineSet{}, + machineSet: machinev1.MachineSet{ + Status: machinev1.MachineSetStatus{ + AuthoritativeAPI: machinev1.MachineAuthorityMachineAPI, + }, + }, machine: machinev1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "withNoMatchingOwnerRef", @@ -178,6 +185,9 @@ func TestShouldExcludeMachine(t *testing.T) { }, }, }, + Status: machinev1.MachineSetStatus{ + AuthoritativeAPI: machinev1.MachineAuthorityMachineAPI, + }, }, machine: machinev1.Machine{ ObjectMeta: metav1.ObjectMeta{ @@ -191,7 +201,11 @@ func TestShouldExcludeMachine(t *testing.T) { expected: false, }, { - machineSet: machinev1.MachineSet{}, + machineSet: machinev1.MachineSet{ + Status: machinev1.MachineSetStatus{ + AuthoritativeAPI: machinev1.MachineAuthorityMachineAPI, + }, + }, machine: machinev1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "withDeletionTimestamp", @@ -225,6 +239,9 @@ func TestAdoptOrphan(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "adoptOrphanMachine", }, + Status: machinev1.MachineSetStatus{ + AuthoritativeAPI: machinev1.MachineAuthorityMachineAPI, + }, } controller := true blockOwnerDeletion := true @@ -306,7 +323,11 @@ var _ = Describe("MachineSet Reconcile", func() { }, Spec: machinev1.MachineSetSpec{ Template: machinev1.MachineTemplateSpec{}, - }} + }, + Status: machinev1.MachineSetStatus{ + AuthoritativeAPI: machinev1.MachineAuthorityMachineAPI, + }, + } r.Client = fake.NewClientBuilder().WithScheme(scheme.Scheme).WithRuntimeObjects(ms).WithStatusSubresource(&machinev1.MachineSet{}).Build() }) @@ -335,6 +356,9 @@ var _ = Describe("MachineSet Reconcile", func() { Spec: machinev1.MachineSetSpec{ Replicas: &replicas, }, + Status: machinev1.MachineSetStatus{ + AuthoritativeAPI: machinev1.MachineAuthorityMachineAPI, + }, } ms.Spec.Selector.MatchLabels = map[string]string{ diff --git a/pkg/controller/machineset/machineset_controller_suite_test.go b/pkg/controller/machineset/machineset_controller_suite_test.go index a2f33a480fab03bc94691cb44dfb0c608f7a0dcd..afc9ff473d8082a6c72d518ccc8f0401d3c3f35c 100644 --- a/pkg/controller/machineset/machineset_controller_suite_test.go +++ b/pkg/controller/machineset/machineset_controller_suite_test.go @@ -19,13 +19,17 @@ package machineset import ( "context" "flag" + "log" + "os" "path/filepath" "testing" "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + configv1 "github.com/openshift/api/config/v1" machinev1 "github.com/openshift/api/machine/v1beta1" + "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/klog/v2/textlogger" @@ -38,9 +42,6 @@ func init() { textLoggerConfig := textlogger.NewConfig() textLoggerConfig.AddFlags(flag.CommandLine) logf.SetLogger(textlogger.NewLogger(textLoggerConfig)) - - // Register required object kinds with global scheme. - _ = machinev1.Install(scheme.Scheme) } const ( @@ -62,18 +63,38 @@ func TestMachinesetController(t *testing.T) { var _ = BeforeSuite(func(ctx SpecContext) { By("bootstrapping test environment") + testEnv = &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "install")}, + + CRDInstallOptions: envtest.CRDInstallOptions{ + Paths: []string{ + filepath.Join("..", "..", "..", "vendor", "github.com", "openshift", "api", "machine", "v1beta1", "zz_generated.crd-manifests", "0000_10_machine-api_01_machines-CustomNoUpgrade.crd.yaml"), + filepath.Join("..", "..", "..", "vendor", "github.com", "openshift", "api", "machine", "v1beta1", "zz_generated.crd-manifests", "0000_10_machine-api_01_machinesets-CustomNoUpgrade.crd.yaml"), + filepath.Join("..", "..", "..", "vendor", "github.com", "openshift", "api", "config", "v1", "zz_generated.crd-manifests", "0000_00_cluster-version-operator_01_clusterversions-CustomNoUpgrade.crd.yaml"), + filepath.Join("..", "..", "..", "vendor", "github.com", "openshift", "api", "config", "v1", "zz_generated.crd-manifests", "0000_10_config-operator_01_featuregates.crd.yaml"), + }, + }, } var err error cfg, err = testEnv.Start() Expect(err).ToNot(HaveOccurred()) Expect(cfg).ToNot(BeNil()) - }) var _ = AfterSuite(func() { By("tearing down the test environment") Expect(testEnv.Stop()).To(Succeed()) }) + +func TestMain(m *testing.M) { + // Register required object kinds with global scheme. + if err := machinev1.Install(scheme.Scheme); err != nil { + log.Fatalf("cannot add scheme: %v", err) + } + if err := configv1.Install(scheme.Scheme); err != nil { + log.Fatalf("cannot add scheme: %v", err) + } + exitVal := m.Run() + os.Exit(exitVal) +} diff --git a/pkg/controller/machineset/machineset_controller_test.go b/pkg/controller/machineset/machineset_controller_test.go index be1550860141a7db1e12059745eb9e4ae9a6061d..e241acaadec7138e3f02fdae99f6a343ccfd4d07 100644 --- a/pkg/controller/machineset/machineset_controller_test.go +++ b/pkg/controller/machineset/machineset_controller_test.go @@ -25,11 +25,13 @@ import ( machinev1 "github.com/openshift/api/machine/v1beta1" machinev1resourcebuilder "github.com/openshift/cluster-api-actuator-pkg/testutils/resourcebuilder/machine/v1beta1" - testutils "github.com/openshift/machine-api-operator/pkg/util/testing" + "github.com/openshift/cluster-control-plane-machine-set-operator/test/e2e/framework" + testutils "github.com/openshift/machine-api-operator/pkg/util/testing" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest/komega" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/metrics/server" @@ -38,6 +40,7 @@ import ( var _ = Describe("MachineSet Reconciler", func() { var mgrCtxCancel context.CancelFunc var mgrStopped chan struct{} + var k komega.Komega var namespace *corev1.Namespace var machineSetBuilder machinev1resourcebuilder.MachineSetBuilder var replicas int32 = int32(2) @@ -52,6 +55,7 @@ var _ = Describe("MachineSet Reconciler", func() { Expect(err).NotTo(HaveOccurred()) k8sClient = mgr.GetClient() + k = komega.New(k8sClient) By("Setting up feature gates") gate, err := testutils.NewDefaultMutableFeatureGate() @@ -105,6 +109,11 @@ var _ = Describe("MachineSet Reconciler", func() { By("Creating the MachineSet") Expect(k8sClient.Create(ctx, instance)).To(Succeed()) + By("Setting the AuthoritativeAPI to MachineAPI") + Eventually(k.UpdateStatus(instance, func() { + instance.Status.AuthoritativeAPI = machinev1.MachineAuthorityMachineAPI + })).Should(Succeed()) + machines := &machinev1.MachineList{} By("Verifying that we have 2 replicas") Eventually(func() (int, error) { @@ -133,6 +142,96 @@ var _ = Describe("MachineSet Reconciler", func() { return ready, nil }, timeout*3).Should(BeEquivalentTo(replicas)) }) + + It("Should set the Paused condition appropriately", func() { + instance := machineSetBuilder. + WithName("baz"). + WithLabels(map[string]string{"baz": "bar"}). + Build() + + By("Creating the MachineSet") + Expect(k8sClient.Create(ctx, instance)).To(Succeed()) + + By("Setting the AuthoritativeAPI to ClusterAPI") + Eventually(k.UpdateStatus(instance, func() { + instance.Status.AuthoritativeAPI = machinev1.MachineAuthorityClusterAPI + })).Should(Succeed()) + + By("Verifying that the AuthoritativeAPI is set to Cluster API") + Eventually(k.Object(instance), timeout).Should(HaveField("Status.AuthoritativeAPI", Equal(machinev1.MachineAuthorityClusterAPI))) + + By("Verifying the paused condition is approproately set to true") + Eventually(k.Object(instance), timeout).Should(HaveField("Status.Conditions", ContainElement(SatisfyAll( + HaveField("Type", Equal(PausedCondition)), + HaveField("Status", Equal(corev1.ConditionTrue)), + )))) + + // The condition should remain true whilst transitioning through 'Migrating' + // Run this in a goroutine so we don't block + + // Copy the instance before starting the goroutine, to avoid data races + instanceCopy := instance.DeepCopy() + go func() { + defer GinkgoRecover() + framework.RunCheckUntil( + ctx, + // Check that we consistently have the Paused condition true + func(_ context.Context, g framework.GomegaAssertions) bool { + By("Checking that the paused condition is consistently true whilst migrating to MachineAPI") + + localInstance := instanceCopy.DeepCopy() + if err := k8sClient.Get(ctx, client.ObjectKeyFromObject(localInstance), localInstance); err != nil { + return g.Expect(err).Should(WithTransform(testutils.IsRetryableAPIError, BeTrue()), "expected temporary error while getting machine: %v", err) + } + + return g.Expect(localInstance.Status.Conditions).Should(ContainElement(SatisfyAll( + HaveField("Type", Equal(PausedCondition)), + HaveField("Status", Equal(corev1.ConditionTrue)), + ))) + + }, + // Condition / until function: until we observe the MachineAuthority being MAPI + func(_ context.Context, g framework.GomegaAssertions) bool { + By("Checking that the AuthoritativeAPI is not MachineAPI") + + localInstance := instanceCopy.DeepCopy() + if err := k8sClient.Get(ctx, client.ObjectKeyFromObject(localInstance), localInstance); err != nil { + return g.Expect(err).Should(WithTransform(testutils.IsRetryableAPIError, BeTrue()), "expected temporary error while getting machine: %v", err) + } + + return g.Expect(localInstance.Status.AuthoritativeAPI).To(Equal(machinev1.MachineAuthorityMachineAPI)) + }) + }() + + By("Transitioning the AuthoritativeAPI though Migrating") + Eventually(k.UpdateStatus(instance, func() { + instance.Status.AuthoritativeAPI = machinev1.MachineAuthorityMigrating + })).Should(Succeed()) + + By("Updating the AuthoritativeAPI from Migrating to MachineAPI") + Eventually(k.UpdateStatus(instance, func() { + instance.Status.AuthoritativeAPI = machinev1.MachineAuthorityMachineAPI + })).Should(Succeed()) + + Eventually(k.Object(instance), timeout).Should(HaveField("Status.AuthoritativeAPI", Equal(machinev1.MachineAuthorityMachineAPI))) + + By("Verifying the paused condition is approproately set to false") + Eventually(k.Object(instance), timeout).Should(HaveField("Status.Conditions", ContainElement(SatisfyAll( + HaveField("Type", Equal(PausedCondition)), + HaveField("Status", Equal(corev1.ConditionFalse)), + )))) + + By("Unsetting the AuthoritativeAPI field in the status") + Eventually(k.UpdateStatus(instance, func() { + instance.Status.AuthoritativeAPI = "" + })).Should(Succeed()) + + By("Verifying the paused condition is still approproately set to false") + Eventually(k.Object(instance), timeout).Should(HaveField("Status.Conditions", ContainElement(SatisfyAll( + HaveField("Type", Equal(PausedCondition)), + HaveField("Status", Equal(corev1.ConditionFalse)), + )))) + }) }) func cleanResources(namespace string) error { diff --git a/pkg/controller/machineset/status.go b/pkg/controller/machineset/status.go index 430a1b87347851ae22cbb1b7de464c9f89134d63..8332469f63bc8d65babe41c06aea10cb6e7c0d72 100644 --- a/pkg/controller/machineset/status.go +++ b/pkg/controller/machineset/status.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "reflect" machinev1 "github.com/openshift/api/machine/v1beta1" corev1 "k8s.io/api/core/v1" @@ -78,6 +79,7 @@ func updateMachineSetStatus(c client.Client, ms *machinev1.MachineSet, newStatus ms.Status.FullyLabeledReplicas == newStatus.FullyLabeledReplicas && ms.Status.ReadyReplicas == newStatus.ReadyReplicas && ms.Status.AvailableReplicas == newStatus.AvailableReplicas && + reflect.DeepEqual(ms.Status.Conditions, newStatus.Conditions) && ms.Generation == ms.Status.ObservedGeneration { return ms, nil } @@ -99,7 +101,8 @@ func updateMachineSetStatus(c client.Client, ms *machinev1.MachineSet, newStatus fmt.Sprintf("fullyLabeledReplicas %d->%d, ", ms.Status.FullyLabeledReplicas, newStatus.FullyLabeledReplicas) + fmt.Sprintf("readyReplicas %d->%d, ", ms.Status.ReadyReplicas, newStatus.ReadyReplicas) + fmt.Sprintf("availableReplicas %d->%d, ", ms.Status.AvailableReplicas, newStatus.AvailableReplicas) + - fmt.Sprintf("sequence No: %v->%v", ms.Status.ObservedGeneration, newStatus.ObservedGeneration)) + fmt.Sprintf("sequence No: %v->%v", ms.Status.ObservedGeneration, newStatus.ObservedGeneration) + + fmt.Sprintf("conditions: %v->%v", ms.Status.Conditions, newStatus.Conditions)) ms.Status = newStatus updateErr = c.Status().Update(context.Background(), ms) diff --git a/pkg/util/conditions/gettersetter.go b/pkg/util/conditions/gettersetter.go index 05a66ad5909e98993245349297cfe139934149d8..d2a3eafb464b2b3c7ef066e8bd88ea9dbabe94bd 100644 --- a/pkg/util/conditions/gettersetter.go +++ b/pkg/util/conditions/gettersetter.go @@ -171,6 +171,8 @@ func getWrapperObject(from interface{}) GetterSetter { switch obj := from.(type) { case *machinev1.Machine: return &MachineWrapper{obj} + case *machinev1.MachineSet: + return &MachineSetWrapper{obj} case *machinev1.MachineHealthCheck: return &MachineHealthCheckWrapper{obj} default: diff --git a/pkg/util/conditions/wrap.go b/pkg/util/conditions/wrap.go index 9eb587b55b35d9a14d9518d61198a6ecde69a877..5f72d1e1523e5adc9d5ac4db6d5fe9fcbcc8ddef 100644 --- a/pkg/util/conditions/wrap.go +++ b/pkg/util/conditions/wrap.go @@ -27,3 +27,15 @@ func (m *MachineHealthCheckWrapper) GetConditions() []machinev1.Condition { func (m *MachineHealthCheckWrapper) SetConditions(conditions []machinev1.Condition) { m.Status.Conditions = conditions } + +type MachineSetWrapper struct { + *machinev1.MachineSet +} + +func (m *MachineSetWrapper) GetConditions() []machinev1.Condition { + return m.Status.Conditions +} + +func (m *MachineSetWrapper) SetConditions(conditions []machinev1.Condition) { + m.Status.Conditions = conditions +} diff --git a/pkg/util/testing/testing.go b/pkg/util/testing/testing.go index 9618851c7db1f3b135de37730f3de3095ff7847f..9eeef087fe95a1ad506ff5c951010d2ee29cbfd1 100644 --- a/pkg/util/testing/testing.go +++ b/pkg/util/testing/testing.go @@ -9,8 +9,10 @@ import ( openshiftfeatures "github.com/openshift/api/features" "github.com/openshift/library-go/pkg/features" corev1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apiserver/pkg/util/feature" "k8s.io/component-base/featuregate" @@ -188,3 +190,21 @@ func NewDefaultMutableFeatureGate() (featuregate.MutableFeatureGate, error) { return defaultMutableGate, nil } + +// IsRetryableAPIError returns whether an API error is retryable or not. +// inspired by: k8s.io/kubernetes/test/utils. +func IsRetryableAPIError(err error) bool { + // These errors may indicate a transient error that we can retry in tests. + if apierrs.IsInternalError(err) || apierrs.IsTimeout(err) || apierrs.IsServerTimeout(err) || + apierrs.IsTooManyRequests(err) || utilnet.IsProbableEOF(err) || utilnet.IsConnectionReset(err) || + utilnet.IsHTTP2ConnectionLost(err) { + return true + } + + // If the error sends the Retry-After header, we respect it as an explicit confirmation we should retry. + if _, shouldRetry := apierrs.SuggestsClientDelay(err); shouldRetry { + return true + } + + return false +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 616201649cfef616cf9e6e3a32cabf6e3d6c9d50..9877d0d034286ac66a56a623c955fb44ced1d5f7 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1982,7 +1982,7 @@ mvdan.cc/unparam/check sigs.k8s.io/cluster-api/api/v1beta1 sigs.k8s.io/cluster-api/errors sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1 -# sigs.k8s.io/controller-runtime v0.18.3 +# sigs.k8s.io/controller-runtime v0.18.4 ## explicit; go 1.22.0 sigs.k8s.io/controller-runtime sigs.k8s.io/controller-runtime/pkg/builder diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go index 05153f74ce7cef7864fb3b5bdd89c4a3eaf6a55a..176ce0db0f1467112267a31f85e34e4e39409d2e 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go @@ -52,12 +52,22 @@ func newAlreadyOwnedError(obj metav1.Object, owner metav1.OwnerReference) *Alrea } } +// OwnerReferenceOption is a function that can modify a `metav1.OwnerReference`. +type OwnerReferenceOption func(*metav1.OwnerReference) + +// WithBlockOwnerDeletion allows configuring the BlockOwnerDeletion field on the `metav1.OwnerReference`. +func WithBlockOwnerDeletion(blockOwnerDeletion bool) OwnerReferenceOption { + return func(ref *metav1.OwnerReference) { + ref.BlockOwnerDeletion = &blockOwnerDeletion + } +} + // SetControllerReference sets owner as a Controller OwnerReference on controlled. // This is used for garbage collection of the controlled object and for // reconciling the owner object on changes to controlled (with a Watch + EnqueueRequestForOwner). // Since only one OwnerReference can be a controller, it returns an error if // there is another OwnerReference with Controller flag set. -func SetControllerReference(owner, controlled metav1.Object, scheme *runtime.Scheme) error { +func SetControllerReference(owner, controlled metav1.Object, scheme *runtime.Scheme, opts ...OwnerReferenceOption) error { // Validate the owner. ro, ok := owner.(runtime.Object) if !ok { @@ -80,6 +90,9 @@ func SetControllerReference(owner, controlled metav1.Object, scheme *runtime.Sch BlockOwnerDeletion: ptr.To(true), Controller: ptr.To(true), } + for _, opt := range opts { + opt(&ref) + } // Return early with an error if the object is already controlled. if existing := metav1.GetControllerOf(controlled); existing != nil && !referSameObject(*existing, ref) { @@ -94,7 +107,7 @@ func SetControllerReference(owner, controlled metav1.Object, scheme *runtime.Sch // SetOwnerReference is a helper method to make sure the given object contains an object reference to the object provided. // This allows you to declare that owner has a dependency on the object without specifying it as a controller. // If a reference to the same object already exists, it'll be overwritten with the newly provided version. -func SetOwnerReference(owner, object metav1.Object, scheme *runtime.Scheme) error { +func SetOwnerReference(owner, object metav1.Object, scheme *runtime.Scheme, opts ...OwnerReferenceOption) error { // Validate the owner. ro, ok := owner.(runtime.Object) if !ok { @@ -115,6 +128,9 @@ func SetOwnerReference(owner, object metav1.Object, scheme *runtime.Scheme) erro UID: owner.GetUID(), Name: owner.GetName(), } + for _, opt := range opts { + opt(&ref) + } // Update owner references and return. upsertOwnerRef(ref, object)