diff --git a/pkg/controller/machine/controller_test.go b/pkg/controller/machine/controller_test.go
index 930287c2c65b52a8036a87301d9beb39a3723085..a180f10eff7e49369a48106a78f8f40a7a13c1c8 100644
--- a/pkg/controller/machine/controller_test.go
+++ b/pkg/controller/machine/controller_test.go
@@ -27,6 +27,8 @@ import (
 	. "github.com/onsi/gomega"
 	machinev1 "github.com/openshift/api/machine/v1beta1"
 	"github.com/openshift/machine-api-operator/pkg/util/conditions"
+	testutils "github.com/openshift/machine-api-operator/pkg/util/testing"
+
 	corev1 "k8s.io/api/core/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/runtime"
@@ -534,6 +536,10 @@ func TestReconcileRequest(t *testing.T) {
 
 	for _, tc := range testCases {
 		t.Run(tc.request.Name, func(t *testing.T) {
+			gate, err := testutils.NewDefaultMutableFeatureGate()
+			if err != nil {
+				t.Errorf("Case: %s. Unexpected error setting up feature gates: %v", tc.request.Name, err)
+			}
 			act := newTestActuator()
 			act.ExistsValue = tc.existsValue
 			r := &ReconcileMachine{
@@ -551,6 +557,7 @@ func TestReconcileRequest(t *testing.T) {
 				).WithStatusSubresource(&machinev1.Machine{}).Build(),
 				scheme:   scheme.Scheme,
 				actuator: act,
+				gate:     gate,
 			}
 
 			result, err := r.Reconcile(ctx, tc.request)
@@ -596,9 +603,6 @@ func TestReconcileRequest(t *testing.T) {
 }
 
 func TestUpdateStatus(t *testing.T) {
-	cleanupFn := StartEnvTest(t)
-	defer cleanupFn(t)
-
 	drainableTrue := conditions.TrueCondition(machinev1.MachineDrainable)
 	terminableTrue := conditions.TrueCondition(machinev1.MachineTerminable)
 	defaultLifecycleConditions := []machinev1.Condition{*drainableTrue, *terminableTrue}
@@ -711,12 +715,19 @@ func TestUpdateStatus(t *testing.T) {
 		},
 	}
 
+	// We don't need to recreate the test environment for every case.
+	g := NewWithT(t)
+	_, testEnv, err := StartEnvTest()
+	g.Expect(err).ToNot(HaveOccurred())
+	defer func() {
+		g.Expect(testEnv.Stop()).To(Succeed())
+	}()
+
 	for _, tc := range testCases {
 		t.Run(tc.name, func(t *testing.T) {
-			g := NewWithT(t)
-
+			gs := NewWithT(t)
 			k8sClient, err := client.New(cfg, client.Options{})
-			g.Expect(err).ToNot(HaveOccurred())
+			gs.Expect(err).ToNot(HaveOccurred())
 			reconciler := &ReconcileMachine{
 				Client: k8sClient,
 				scheme: scheme.Scheme,
@@ -729,7 +740,7 @@ func TestUpdateStatus(t *testing.T) {
 					GenerateName: name,
 				},
 			}
-			g.Expect(k8sClient.Create(ctx, namespace)).To(Succeed())
+			gs.Expect(k8sClient.Create(ctx, namespace)).To(Succeed())
 
 			// Set up the test machine
 			machine := &machinev1.Machine{
@@ -739,7 +750,7 @@ func TestUpdateStatus(t *testing.T) {
 				},
 			}
 
-			g.Expect(k8sClient.Create(ctx, machine)).To(Succeed())
+			gs.Expect(k8sClient.Create(ctx, machine)).To(Succeed())
 			defer func() {
 				if err := k8sClient.Delete(ctx, machine); err != nil {
 					t.Fatalf("error deleting machine: %v", err)
@@ -752,7 +763,7 @@ func TestUpdateStatus(t *testing.T) {
 				}
 			}
 
-			g.Expect(k8sClient.Status().Update(ctx, machine)).To(Succeed())
+			gs.Expect(k8sClient.Status().Update(ctx, machine)).To(Succeed())
 
 			namespacedName := types.NamespacedName{
 				Namespace: machine.Namespace,
@@ -765,20 +776,20 @@ func TestUpdateStatus(t *testing.T) {
 			}
 
 			// Set the phase to Running initially
-			g.Expect(reconciler.updateStatus(context.TODO(), machine, machinev1.PhaseRunning, nil, []machinev1.Condition{})).To(Succeed())
+			gs.Expect(reconciler.updateStatus(context.TODO(), machine, machinev1.PhaseRunning, nil, []machinev1.Condition{})).To(Succeed())
 			// validate persisted object
 			got := machinev1.Machine{}
-			g.Expect(reconciler.Client.Get(context.TODO(), namespacedName, &got)).To(Succeed())
-			g.Expect(got.Status.Phase).ToNot(BeNil())
-			g.Expect(*got.Status.Phase).To(Equal(machinev1.PhaseRunning))
+			gs.Expect(reconciler.Client.Get(context.TODO(), namespacedName, &got)).To(Succeed())
+			gs.Expect(got.Status.Phase).ToNot(BeNil())
+			gs.Expect(*got.Status.Phase).To(Equal(machinev1.PhaseRunning))
 			lastUpdated := got.Status.LastUpdated
 			gotConditions := got.Status.Conditions
-			g.Expect(lastUpdated).ToNot(BeNil())
+			gs.Expect(lastUpdated).ToNot(BeNil())
 			// validate passed object
-			g.Expect(machine.Status.Phase).ToNot(BeNil())
-			g.Expect(*machine.Status.Phase).To(Equal(machinev1.PhaseRunning))
+			gs.Expect(machine.Status.Phase).ToNot(BeNil())
+			gs.Expect(*machine.Status.Phase).To(Equal(machinev1.PhaseRunning))
 			objectLastUpdated := machine.Status.LastUpdated
-			g.Expect(objectLastUpdated).ToNot(BeNil())
+			gs.Expect(objectLastUpdated).ToNot(BeNil())
 
 			// Set the time func so that we can check lastUpdated is set correctly
 			reconciler.nowFunc = func() time.Time {
@@ -790,38 +801,38 @@ func TestUpdateStatus(t *testing.T) {
 				c := cond
 				conditions.Set(machine, &c)
 			}
-			g.Expect(reconciler.updateStatus(context.TODO(), machine, tc.phase, tc.err, gotConditions)).To(Succeed())
+			gs.Expect(reconciler.updateStatus(context.TODO(), machine, tc.phase, tc.err, gotConditions)).To(Succeed())
 			// validate the persisted object
 			got = machinev1.Machine{}
-			g.Expect(reconciler.Client.Get(context.TODO(), namespacedName, &got)).To(Succeed())
+			gs.Expect(reconciler.Client.Get(context.TODO(), namespacedName, &got)).To(Succeed())
 
 			if tc.updated {
-				g.Expect(got.Status.LastUpdated.UnixNano()).ToNot(Equal(lastUpdated.UnixNano()))
-				g.Expect(machine.Status.LastUpdated.UnixNano()).ToNot(Equal(objectLastUpdated.UnixNano()))
+				gs.Expect(got.Status.LastUpdated.UnixNano()).ToNot(Equal(lastUpdated.UnixNano()))
+				gs.Expect(machine.Status.LastUpdated.UnixNano()).ToNot(Equal(objectLastUpdated.UnixNano()))
 			} else {
-				g.Expect(got.Status.LastUpdated.UnixNano()).To(Equal(lastUpdated.UnixNano()))
-				g.Expect(machine.Status.LastUpdated.UnixNano()).To(Equal(objectLastUpdated.UnixNano()))
+				gs.Expect(got.Status.LastUpdated.UnixNano()).To(Equal(lastUpdated.UnixNano()))
+				gs.Expect(machine.Status.LastUpdated.UnixNano()).To(Equal(objectLastUpdated.UnixNano()))
 			}
 
 			if tc.err != nil {
-				g.Expect(got.Status.ErrorMessage).ToNot(BeNil())
-				g.Expect(*got.Status.ErrorMessage).To(Equal(tc.err.Error()))
-				g.Expect(machine.Status.ErrorMessage).ToNot(BeNil())
-				g.Expect(*machine.Status.ErrorMessage).To(Equal(tc.err.Error()))
+				gs.Expect(got.Status.ErrorMessage).ToNot(BeNil())
+				gs.Expect(*got.Status.ErrorMessage).To(Equal(tc.err.Error()))
+				gs.Expect(machine.Status.ErrorMessage).ToNot(BeNil())
+				gs.Expect(*machine.Status.ErrorMessage).To(Equal(tc.err.Error()))
 			}
 
-			g.Expect(*got.Status.Phase).To(Equal(tc.phase))
-			g.Expect(*machine.Status.Phase).To(Equal(tc.phase))
+			gs.Expect(*got.Status.Phase).To(Equal(tc.phase))
+			gs.Expect(*machine.Status.Phase).To(Equal(tc.phase))
 
-			g.Expect(got.Status.Conditions).To(conditions.MatchConditions(tc.conditions))
-			g.Expect(machine.Status.Conditions).To(conditions.MatchConditions(tc.conditions))
+			gs.Expect(got.Status.Conditions).To(conditions.MatchConditions(tc.conditions))
+			gs.Expect(machine.Status.Conditions).To(conditions.MatchConditions(tc.conditions))
 
-			g.Expect(got.GetAnnotations()).To(Equal(tc.annotations))
-			g.Expect(machine.GetAnnotations()).To(Equal(tc.annotations))
+			gs.Expect(got.GetAnnotations()).To(Equal(tc.annotations))
+			gs.Expect(machine.GetAnnotations()).To(Equal(tc.annotations))
 
 			if tc.existingProviderStatus != "" {
-				g.Expect(got.Status.ProviderStatus).ToNot(BeNil())
-				g.Expect(got.Status.ProviderStatus.Raw).To(BeEquivalentTo(tc.expectedProviderStatus))
+				gs.Expect(got.Status.ProviderStatus).ToNot(BeNil())
+				gs.Expect(got.Status.ProviderStatus.Raw).To(BeEquivalentTo(tc.expectedProviderStatus))
 			}
 		})
 	}
diff --git a/pkg/controller/machine/machine_controller_suite_test.go b/pkg/controller/machine/machine_controller_suite_test.go
index ec5d378414f41ef844a7dae25c01620f62d20bd4..407bbd9a597c7a32259e8cdd6a5f039f726a86dc 100644
--- a/pkg/controller/machine/machine_controller_suite_test.go
+++ b/pkg/controller/machine/machine_controller_suite_test.go
@@ -20,6 +20,7 @@ import (
 	"context"
 	"flag"
 	"log"
+	"os"
 	"path/filepath"
 	"testing"
 	"time"
@@ -27,6 +28,7 @@ import (
 	. "github.com/onsi/ginkgo/v2"
 	. "github.com/onsi/gomega"
 
+	configv1 "github.com/openshift/api/config/v1"
 	machinev1 "github.com/openshift/api/machine/v1beta1"
 	"k8s.io/client-go/kubernetes/scheme"
 	"k8s.io/client-go/rest"
@@ -40,9 +42,6 @@ func init() {
 	textLoggerConfig := textlogger.NewConfig()
 	textLoggerConfig.AddFlags(flag.CommandLine)
 	logf.SetLogger(textlogger.NewLogger(textLoggerConfig))
-
-	// Register required object kinds with global scheme.
-	_ = machinev1.Install(scheme.Scheme)
 }
 
 const (
@@ -64,17 +63,8 @@ func TestMachineController(t *testing.T) {
 
 var _ = BeforeSuite(func(ctx SpecContext) {
 	By("bootstrapping test environment")
-	testEnv = &envtest.Environment{
-
-		CRDInstallOptions: envtest.CRDInstallOptions{
-			Paths: []string{
-				filepath.Join("..", "..", "..", "vendor", "github.com", "openshift", "api", "machine", "v1beta1", "zz_generated.crd-manifests", "0000_10_machine-api_01_machines-CustomNoUpgrade.crd.yaml"),
-			},
-		},
-	}
-
 	var err error
-	cfg, err = testEnv.Start()
+	cfg, testEnv, err = StartEnvTest()
 	Expect(err).ToNot(HaveOccurred())
 	Expect(cfg).ToNot(BeNil())
 
@@ -85,7 +75,19 @@ var _ = AfterSuite(func() {
 	Expect(testEnv.Stop()).To(Succeed())
 })
 
-func StartEnvTest(t *testing.T) func(t *testing.T) {
+func TestMain(m *testing.M) {
+	// Register required object kinds with global scheme.
+	if err := machinev1.Install(scheme.Scheme); err != nil {
+		log.Fatalf("cannot add scheme: %v", err)
+	}
+	if err := configv1.Install(scheme.Scheme); err != nil {
+		log.Fatalf("cannot add scheme: %v", err)
+	}
+	exitVal := m.Run()
+	os.Exit(exitVal)
+}
+
+func StartEnvTest() (*rest.Config, *envtest.Environment, error) {
 	testEnv := &envtest.Environment{
 
 		CRDInstallOptions: envtest.CRDInstallOptions{
@@ -94,18 +96,11 @@ func StartEnvTest(t *testing.T) func(t *testing.T) {
 			},
 		},
 	}
-	if err := machinev1.Install(scheme.Scheme); err != nil {
-		log.Fatalf("cannot add scheme: %v", err)
-	}
 
 	var err error
 	if cfg, err = testEnv.Start(); err != nil {
-		log.Fatal(err)
+		return nil, nil, err
 	}
 
-	return func(t *testing.T) {
-		if err = testEnv.Stop(); err != nil {
-			log.Fatal(err)
-		}
-	}
+	return cfg, testEnv, nil
 }
diff --git a/pkg/controller/machine/machine_controller_test.go b/pkg/controller/machine/machine_controller_test.go
index ac67608ca709ddd507703daccfd37cab57427b54..f5271b119cccdf485a908b24f0b9a7a88d8743c3 100644
--- a/pkg/controller/machine/machine_controller_test.go
+++ b/pkg/controller/machine/machine_controller_test.go
@@ -23,27 +23,28 @@ import (
 	. "github.com/onsi/gomega"
 
 	machinev1 "github.com/openshift/api/machine/v1beta1"
-	"github.com/openshift/machine-api-operator/pkg/util/conditions"
 	"golang.org/x/net/context"
 	corev1 "k8s.io/api/core/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/runtime"
 
+	machinev1resourcebuilder "github.com/openshift/cluster-api-actuator-pkg/testutils/resourcebuilder/machine/v1beta1"
+	"github.com/openshift/cluster-control-plane-machine-set-operator/test/e2e/framework"
+	testutils "github.com/openshift/machine-api-operator/pkg/util/testing"
+
+	apierrs "k8s.io/apimachinery/pkg/api/errors"
+	utilnet "k8s.io/apimachinery/pkg/util/net"
 	"sigs.k8s.io/controller-runtime/pkg/client"
 	"sigs.k8s.io/controller-runtime/pkg/envtest/komega"
 	"sigs.k8s.io/controller-runtime/pkg/manager"
 	"sigs.k8s.io/controller-runtime/pkg/metrics/server"
-
-	testutils "github.com/openshift/machine-api-operator/pkg/util/testing"
-	"sigs.k8s.io/controller-runtime/pkg/reconcile"
 )
 
-var _ = Describe("MachineSet Reconciler", func() {
+var _ = Describe("Machine Reconciler", func() {
 	var mgrCtxCancel context.CancelFunc
 	var mgrStopped chan struct{}
 	var k komega.Komega
 	var namespace *corev1.Namespace
-	var reconciler reconcile.Reconciler
+	var machineBuilder machinev1resourcebuilder.MachineBuilder
 
 	BeforeEach(func() {
 		By("Setting up a new manager")
@@ -63,10 +64,9 @@ var _ = Describe("MachineSet Reconciler", func() {
 
 		By("Setting up a new reconciler")
 		act := newTestActuator()
-		reconciler = newReconciler(mgr, act, gate)
+		reconciler := newReconciler(mgr, act, gate)
 
-		err = add(mgr, reconciler, "testing")
-		Expect(err).NotTo(HaveOccurred())
+		Expect(add(mgr, reconciler, "testing")).To(Succeed())
 
 		var mgrCtx context.Context
 		mgrCtx, mgrCtxCancel = context.WithCancel(ctx)
@@ -81,13 +81,20 @@ var _ = Describe("MachineSet Reconciler", func() {
 		}()
 
 		By("Creating the namespace")
-		namespace = &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{GenerateName: "ms-test"}}
+		namespace = &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{GenerateName: "machine-test"}}
 		Expect(k8sClient.Create(ctx, namespace)).To(Succeed())
+
+		By("Setting up the machine builder")
+		machineBuilder = machinev1resourcebuilder.Machine().
+			WithNamespace(namespace.ObjectMeta.Name).
+			WithGenerateName("foo").
+			WithProviderSpecBuilder(machinev1resourcebuilder.GCPProviderSpec())
+
 	})
 
 	AfterEach(func() {
-		By("Deleting the machinesets")
-		Expect(cleanResources()).To(Succeed())
+		By("Deleting the machines")
+		Expect(cleanResources(namespace.Name)).To(Succeed())
 
 		By("Deleting the namespace")
 		Expect(k8sClient.Delete(ctx, namespace)).To(Succeed())
@@ -98,22 +105,7 @@ var _ = Describe("MachineSet Reconciler", func() {
 	})
 
 	It("Should reconcile a Machine", func() {
-		instance := &machinev1.Machine{
-			ObjectMeta: metav1.ObjectMeta{
-				Name:      "foo",
-				Namespace: "default",
-				Labels: map[string]string{
-					machinev1.MachineClusterIDLabel: "foo",
-				},
-			},
-			Spec: machinev1.MachineSpec{
-				ProviderSpec: machinev1.ProviderSpec{
-					Value: &runtime.RawExtension{
-						Raw: []byte("{}"),
-					},
-				},
-			},
-		}
+		instance := machineBuilder.Build()
 
 		By("Creating the Machine")
 		Expect(k8sClient.Create(ctx, instance)).To(Succeed())
@@ -123,35 +115,17 @@ var _ = Describe("MachineSet Reconciler", func() {
 			instance.Status.AuthoritativeAPI = machinev1.MachineAuthorityMachineAPI
 		})).Should(Succeed())
 
-		Eventually(func() (machinev1.MachineStatus, error) {
-			machine := &machinev1.Machine{}
-			err := k8sClient.Get(ctx, objectKey(instance), machine)
-			if err != nil {
-				return machinev1.MachineStatus{}, err
-			}
-			return machine.Status, nil
-		}, timeout).ShouldNot(Equal(machinev1.MachineStatus{}))
-		// TODO: Verify that the actuator is called correctly on Create
-		// Expect platform status is not empty. This (check) means we've caLLED the actuator
+		Eventually(k.Object(instance), timeout).Should(HaveField("Status", Not(Equal(machinev1.MachineStatus{}))))
+
+		// // Expect platform status is not empty. This (check) means we've called the
+		// // actuator. Given our test actuator doesn't set any status, we may need to expect to be called.
+
+		// Eventually(k.Object(instance), timeout).Should(HaveField("Status.ProviderStatus", Not(Equal(runtime.RawExtension{}))))
+
 	})
 
 	It("Should set the Paused condition appropriately", func() {
-		instance := &machinev1.Machine{
-			ObjectMeta: metav1.ObjectMeta{
-				Name:      "foo",
-				Namespace: "default",
-				Labels: map[string]string{
-					machinev1.MachineClusterIDLabel: "foo",
-				},
-			},
-			Spec: machinev1.MachineSpec{
-				ProviderSpec: machinev1.ProviderSpec{
-					Value: &runtime.RawExtension{
-						Raw: []byte("{}"),
-					},
-				},
-			},
-		}
+		instance := machineBuilder.Build()
 
 		By("Creating the Machine")
 		Expect(k8sClient.Create(ctx, instance)).To(Succeed())
@@ -162,61 +136,105 @@ var _ = Describe("MachineSet Reconciler", func() {
 		})).Should(Succeed())
 
 		By("Verifying that the AuthoritativeAPI is set to Cluster API")
-		Eventually(func() (machinev1.MachineAuthority, error) {
-			if err := k8sClient.Get(ctx, objectKey(instance), instance); err != nil {
-				return "", err
-			}
-			return instance.Status.AuthoritativeAPI, nil
-		}, timeout).Should(Equal(machinev1.MachineAuthorityClusterAPI))
+		Eventually(k.Object(instance), timeout).Should(HaveField("Status.AuthoritativeAPI", Equal(machinev1.MachineAuthorityClusterAPI)))
 
 		By("Verifying the paused condition is approproately set to true")
-		Eventually(func() bool {
-			if err := k8sClient.Get(ctx, objectKey(instance), instance); err != nil {
-				return false
-			}
-
-			// The condition is set to true
-			return conditions.IsTrue(instance, PausedCondition)
-		}, timeout).Should(BeTrue())
+		Eventually(k.Object(instance), timeout).Should(HaveField("Status.Conditions", ContainElement(SatisfyAll(
+			HaveField("Type", Equal(PausedCondition)),
+			HaveField("Status", Equal(corev1.ConditionTrue)),
+		))))
 
 		By("Transitioning the AuthoritativeAPI though 'Migrating' to MachineAPI")
 		Eventually(k.UpdateStatus(instance, func() {
 			instance.Status.AuthoritativeAPI = machinev1.MachineAuthorityMigrating
 		})).Should(Succeed())
+
+		// The condition should remain true whilst transitioning through 'Migrating'
+		// Run this in a goroutine so we don't block
+
+		// Copy the instance before starting the goroutine, to avoid data races
+		instanceCopy := instance.DeepCopy()
+		go func() {
+			defer GinkgoRecover()
+			framework.RunCheckUntil(
+				ctx,
+				// Check that we consistently have the Paused condition true
+				func(_ context.Context, g framework.GomegaAssertions) bool {
+					By("Checking that the paused condition is consistently true whilst migrating to MachineAPI")
+
+					localInstance := instanceCopy.DeepCopy()
+					if err := k8sClient.Get(ctx, client.ObjectKeyFromObject(localInstance), localInstance); err != nil {
+						return g.Expect(err).Should(WithTransform(isRetryableAPIError, BeTrue()), "expected temporary error while getting machine: %v", err)
+					}
+
+					return g.Expect(localInstance.Status.Conditions).Should(ContainElement(SatisfyAll(
+						HaveField("Type", Equal(PausedCondition)),
+						HaveField("Status", Equal(corev1.ConditionTrue)),
+					)))
+
+				},
+				// Condition / until function: until we observe the MachineAuthority being MAPI
+				func(_ context.Context, g framework.GomegaAssertions) bool {
+					By("Checking that the AuthoritativeAPI is not MachineAPI")
+
+					localInstance := instanceCopy.DeepCopy()
+					if err := k8sClient.Get(ctx, client.ObjectKeyFromObject(localInstance), localInstance); err != nil {
+						return g.Expect(err).Should(WithTransform(isRetryableAPIError, BeTrue()), "expected temporary error while getting machine: %v", err)
+					}
+
+					return g.Expect(localInstance.Status.AuthoritativeAPI).ToNot(Equal(machinev1.MachineAuthorityMachineAPI))
+				})
+		}()
+
+		By("Updating the AuthoritativeAPI from Migrating to MachineAPI")
 		Eventually(k.UpdateStatus(instance, func() {
 			instance.Status.AuthoritativeAPI = machinev1.MachineAuthorityMachineAPI
 		})).Should(Succeed())
 
+		Eventually(k.Object(instance), timeout).Should(HaveField("Status.AuthoritativeAPI", Equal(machinev1.MachineAuthorityMachineAPI)))
+
 		By("Verifying the paused condition is approproately set to false")
-		Eventually(func() bool {
-			if err := k8sClient.Get(ctx, objectKey(instance), instance); err != nil {
-				return false
-			}
-
-			// The condition is set to true
-			return conditions.IsFalse(instance, PausedCondition)
-		}, timeout).Should(BeTrue())
+		Eventually(k.Object(instance), timeout).Should(HaveField("Status.Conditions", ContainElement(SatisfyAll(
+			HaveField("Type", Equal(PausedCondition)),
+			HaveField("Status", Equal(corev1.ConditionFalse)),
+		))))
+
+		By("Unsetting the AuthoritativeAPI field in the status")
+		Eventually(k.UpdateStatus(instance, func() {
+			instance.Status.AuthoritativeAPI = ""
+		})).Should(Succeed())
+
+		By("Verifying the paused condition is still approproately set to false")
+		Eventually(k.Object(instance), timeout).Should(HaveField("Status.Conditions", ContainElement(SatisfyAll(
+			HaveField("Type", Equal(PausedCondition)),
+			HaveField("Status", Equal(corev1.ConditionFalse)),
+		))))
 	})
 })
 
-func objectKey(object metav1.Object) client.ObjectKey {
-	return client.ObjectKey{
-		Namespace: object.GetNamespace(),
-		Name:      object.GetName(),
+func cleanResources(namespace string) error {
+	machine := &machinev1.Machine{}
+	if err := k8sClient.DeleteAllOf(ctx, machine, client.InNamespace(namespace)); err != nil {
+		return err
 	}
+
+	return nil
 }
 
-func cleanResources() error {
-	machines := &machinev1.MachineList{}
-	if err := k8sClient.List(ctx, machines); err != nil {
-		return err
+// isRetryableAPIError returns whether an API error is retryable or not.
+// inspired by: k8s.io/kubernetes/test/utils.
+func isRetryableAPIError(err error) bool {
+	// These errors may indicate a transient error that we can retry in tests.
+	if apierrs.IsInternalError(err) || apierrs.IsTimeout(err) || apierrs.IsServerTimeout(err) ||
+		apierrs.IsTooManyRequests(err) || utilnet.IsProbableEOF(err) || utilnet.IsConnectionReset(err) ||
+		utilnet.IsHTTP2ConnectionLost(err) {
+		return true
 	}
-	for _, machine := range machines.Items {
-		m := machine
-		if err := k8sClient.Delete(ctx, &m); err != nil {
-			return err
-		}
+
+	// If the error sends the Retry-After header, we respect it as an explicit confirmation we should retry.
+	if _, shouldRetry := apierrs.SuggestsClientDelay(err); shouldRetry {
+		return true
 	}
 
-	return nil
+	return false
 }