diff --git a/cmd/machine-api-operator/start.go b/cmd/machine-api-operator/start.go
index b225ecc29669be6ec3d7db971be3182b8a7d5f74..de1d1abdad25ea1959ef7508ef1cd5f53dc9a921 100644
--- a/cmd/machine-api-operator/start.go
+++ b/cmd/machine-api-operator/start.go
@@ -72,11 +72,13 @@ func runStartCmd(cmd *cobra.Command, args []string) {
 	}
 	stopCh := make(chan struct{})
 
+	le := util.GetLeaderElectionConfig(cb.config, osconfigv1.LeaderElection{})
+
 	leaderelection.RunOrDie(context.TODO(), leaderelection.LeaderElectionConfig{
 		Lock:          CreateResourceLock(cb, componentNamespace, componentName),
-		LeaseDuration: util.LeaseDuration,
-		RenewDeadline: util.RenewDeadline,
-		RetryPeriod:   util.RetryPeriod,
+		RenewDeadline: le.RenewDeadline.Duration,
+		RetryPeriod:   le.RetryPeriod.Duration,
+		LeaseDuration: le.LeaseDuration.Duration,
 		Callbacks: leaderelection.LeaderCallbacks{
 			OnStartedLeading: func(ctx context.Context) {
 				ctrlCtx := CreateControllerContext(cb, stopCh, componentNamespace)
diff --git a/cmd/machine-healthcheck/main.go b/cmd/machine-healthcheck/main.go
index 1f9fb002a7a856d041825e37b76321227d056eff..91ab7161a824ff3278c004bc737422dd8db92bc7 100644
--- a/cmd/machine-healthcheck/main.go
+++ b/cmd/machine-healthcheck/main.go
@@ -2,16 +2,20 @@ package main
 
 import (
 	"flag"
+	"fmt"
 	"runtime"
 
 	"github.com/openshift/machine-api-operator/pkg/controller/machinehealthcheck"
 	"github.com/openshift/machine-api-operator/pkg/metrics"
 	"github.com/openshift/machine-api-operator/pkg/util"
 
+	osconfigv1 "github.com/openshift/api/config/v1"
 	machinev1 "github.com/openshift/api/machine/v1beta1"
+	"github.com/openshift/library-go/pkg/config/leaderelection"
 
 	"github.com/openshift/machine-api-operator/pkg/controller"
 	sdkVersion "github.com/operator-framework/operator-sdk/version"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
 	"k8s.io/klog/v2"
 	"sigs.k8s.io/controller-runtime/pkg/client/config"
@@ -27,6 +31,12 @@ func printVersion() {
 }
 
 func main() {
+	// Used to get the default values for leader election from library-go
+	defaultLeaderElectionValues := leaderelection.LeaderElectionDefaulting(
+		osconfigv1.LeaderElection{},
+		"", "",
+	)
+
 	watchNamespace := flag.String(
 		"namespace",
 		"",
@@ -57,10 +67,11 @@ func main() {
 		"Start a leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability.",
 	)
 
+	// Default values are printed for the user to see, but zero is set as the default to distinguish user intent from default value for topology aware leader election
 	leaderElectLeaseDuration := flag.Duration(
 		"leader-elect-lease-duration",
-		util.LeaseDuration,
-		"The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled.",
+		0,
+		fmt.Sprintf("The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled. Default: (%s)", defaultLeaderElectionValues.LeaseDuration.Duration),
 	)
 
 	klog.InitFlags(nil)
@@ -73,15 +84,20 @@ func main() {
 		klog.Fatal(err)
 	}
 
+	le := util.GetLeaderElectionConfig(cfg, osconfigv1.LeaderElection{
+		Disable:       !*leaderElect,
+		LeaseDuration: metav1.Duration{Duration: *leaderElectLeaseDuration},
+	})
+
 	opts := manager.Options{
 		MetricsBindAddress:      *metricsAddress,
 		HealthProbeBindAddress:  *healthAddr,
 		LeaderElection:          *leaderElect,
 		LeaderElectionNamespace: *leaderElectResourceNamespace,
 		LeaderElectionID:        "cluster-api-provider-healthcheck-leader",
-		LeaseDuration:           leaderElectLeaseDuration,
-		RetryPeriod:             util.TimeDuration(util.RetryPeriod),
-		RenewDeadline:           util.TimeDuration(util.RenewDeadline),
+		LeaseDuration:           &le.LeaseDuration.Duration,
+		RetryPeriod:             &le.RetryPeriod.Duration,
+		RenewDeadline:           &le.RenewDeadline.Duration,
 	}
 
 	if *watchNamespace != "" {
diff --git a/cmd/machineset/main.go b/cmd/machineset/main.go
index cb4d2f8a9fc53e04eb9c548d693fe778d5c97204..8fb771430b8fc73ebf79bf001b8af395aab31287 100644
--- a/cmd/machineset/main.go
+++ b/cmd/machineset/main.go
@@ -18,15 +18,19 @@ package main
 
 import (
 	"flag"
+	"fmt"
 	"log"
 	"time"
 
+	osconfigv1 "github.com/openshift/api/config/v1"
 	machinev1 "github.com/openshift/api/machine/v1beta1"
+	"github.com/openshift/library-go/pkg/config/leaderelection"
 	"github.com/openshift/machine-api-operator/pkg/controller"
 	"github.com/openshift/machine-api-operator/pkg/controller/machineset"
 	"github.com/openshift/machine-api-operator/pkg/metrics"
 	"github.com/openshift/machine-api-operator/pkg/util"
 	mapiwebhooks "github.com/openshift/machine-api-operator/pkg/webhooks"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
 	"k8s.io/klog/v2"
 	"sigs.k8s.io/controller-runtime/pkg/client/config"
@@ -42,6 +46,12 @@ const (
 )
 
 func main() {
+	// Used to get the default values for leader election from library-go
+	defaultLeaderElectionValues := leaderelection.LeaderElectionDefaulting(
+		osconfigv1.LeaderElection{},
+		"", "",
+	)
+
 	flag.Set("logtostderr", "true")
 	klog.InitFlags(nil)
 	watchNamespace := flag.String("namespace", "",
@@ -75,10 +85,11 @@ func main() {
 		"Start a leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability.",
 	)
 
+	// Default values are printed for the user to see, but zero is set as the default to distinguish user intent from default value for topology aware leader election
 	leaderElectLeaseDuration := flag.Duration(
 		"leader-elect-lease-duration",
-		util.LeaseDuration,
-		"The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled.",
+		0,
+		fmt.Sprintf("The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled. Default: (%s)", defaultLeaderElectionValues.LeaseDuration.Duration),
 	)
 
 	flag.Parse()
@@ -93,6 +104,11 @@ func main() {
 		log.Fatal(err)
 	}
 
+	le := util.GetLeaderElectionConfig(cfg, osconfigv1.LeaderElection{
+		Disable:       !*leaderElect,
+		LeaseDuration: metav1.Duration{Duration: *leaderElectLeaseDuration},
+	})
+
 	// Create a new Cmd to provide shared dependencies and start components
 	syncPeriod := 10 * time.Minute
 	opts := manager.Options{
@@ -103,9 +119,9 @@ func main() {
 		LeaderElection:          *leaderElect,
 		LeaderElectionNamespace: *leaderElectResourceNamespace,
 		LeaderElectionID:        "cluster-api-provider-machineset-leader",
-		LeaseDuration:           leaderElectLeaseDuration,
-		RetryPeriod:             util.TimeDuration(util.RetryPeriod),
-		RenewDeadline:           util.TimeDuration(util.RenewDeadline),
+		LeaseDuration:           &le.LeaseDuration.Duration,
+		RetryPeriod:             &le.RetryPeriod.Duration,
+		RenewDeadline:           &le.RenewDeadline.Duration,
 	}
 
 	mgr, err := manager.New(cfg, opts)
diff --git a/cmd/nodelink-controller/main.go b/cmd/nodelink-controller/main.go
index 9d044423986886bcf3675964ab061d8cc571010c..523834a424d245784d383aa80cb842760b2ff0ed 100644
--- a/cmd/nodelink-controller/main.go
+++ b/cmd/nodelink-controller/main.go
@@ -2,13 +2,17 @@ package main
 
 import (
 	"flag"
+	"fmt"
 	"runtime"
 
+	osconfigv1 "github.com/openshift/api/config/v1"
 	machinev1 "github.com/openshift/api/machine/v1beta1"
+	"github.com/openshift/library-go/pkg/config/leaderelection"
 	"github.com/openshift/machine-api-operator/pkg/controller"
 	"github.com/openshift/machine-api-operator/pkg/controller/nodelink"
 	"github.com/openshift/machine-api-operator/pkg/util"
 	sdkVersion "github.com/operator-framework/operator-sdk/version"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/klog/v2"
 	"sigs.k8s.io/controller-runtime/pkg/client/config"
 	"sigs.k8s.io/controller-runtime/pkg/manager"
@@ -24,6 +28,12 @@ func printVersion() {
 func main() {
 	printVersion()
 
+	// Used to get the default values for leader election from library-go
+	defaultLeaderElectionValues := leaderelection.LeaderElectionDefaulting(
+		osconfigv1.LeaderElection{},
+		"", "",
+	)
+
 	watchNamespace := flag.String(
 		"namespace",
 		"",
@@ -42,10 +52,11 @@ func main() {
 		"Start a leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability.",
 	)
 
+	// Default values are printed for the user to see, but zero is set as the default to distinguish user intent from default value for topology aware leader election
 	leaderElectLeaseDuration := flag.Duration(
 		"leader-elect-lease-duration",
-		util.LeaseDuration,
-		"The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled.",
+		0,
+		fmt.Sprintf("The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled. Default: (%s)", defaultLeaderElectionValues.LeaseDuration.Duration),
 	)
 
 	klog.InitFlags(nil)
@@ -58,15 +69,20 @@ func main() {
 		klog.Fatal(err)
 	}
 
+	le := util.GetLeaderElectionConfig(cfg, osconfigv1.LeaderElection{
+		Disable:       !*leaderElect,
+		LeaseDuration: metav1.Duration{Duration: *leaderElectLeaseDuration},
+	})
+
 	opts := manager.Options{
 		// Disable metrics serving
 		MetricsBindAddress:      "0",
 		LeaderElection:          *leaderElect,
 		LeaderElectionNamespace: *leaderElectResourceNamespace,
 		LeaderElectionID:        "cluster-api-provider-nodelink-leader",
-		LeaseDuration:           leaderElectLeaseDuration,
-		RetryPeriod:             util.TimeDuration(util.RetryPeriod),
-		RenewDeadline:           util.TimeDuration(util.RenewDeadline),
+		LeaseDuration:           &le.LeaseDuration.Duration,
+		RetryPeriod:             &le.RetryPeriod.Duration,
+		RenewDeadline:           &le.RenewDeadline.Duration,
 	}
 	if *watchNamespace != "" {
 		opts.Namespace = *watchNamespace
diff --git a/cmd/vsphere/main.go b/cmd/vsphere/main.go
index 19af41dfc39dc3778fb027cb5aa092a79de02065..e8a69a237bbf9ebda827fe76f33ed43c0e6b28fd 100644
--- a/cmd/vsphere/main.go
+++ b/cmd/vsphere/main.go
@@ -8,12 +8,14 @@ import (
 
 	configv1 "github.com/openshift/api/config/v1"
 	machinev1 "github.com/openshift/api/machine/v1beta1"
+	"github.com/openshift/library-go/pkg/config/leaderelection"
 	capimachine "github.com/openshift/machine-api-operator/pkg/controller/machine"
 	machine "github.com/openshift/machine-api-operator/pkg/controller/vsphere"
 	machinesetcontroller "github.com/openshift/machine-api-operator/pkg/controller/vsphere/machineset"
 	"github.com/openshift/machine-api-operator/pkg/metrics"
 	"github.com/openshift/machine-api-operator/pkg/util"
 	"github.com/openshift/machine-api-operator/pkg/version"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/klog/v2"
 	"k8s.io/klog/v2/klogr"
 	ctrl "sigs.k8s.io/controller-runtime"
@@ -27,6 +29,12 @@ func main() {
 	var printVersion bool
 	flag.BoolVar(&printVersion, "version", false, "print version and exit")
 
+	// Used to get the default values for leader election from library-go
+	defaultLeaderElectionValues := leaderelection.LeaderElectionDefaulting(
+		configv1.LeaderElection{},
+		"", "",
+	)
+
 	klog.InitFlags(nil)
 	watchNamespace := flag.String(
 		"namespace",
@@ -46,10 +54,11 @@ func main() {
 		"Start a leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability.",
 	)
 
+	// Default values are printed for the user to see, but zero is set as the default to distinguish user intent from default value for topology aware leader election
 	leaderElectLeaseDuration := flag.Duration(
 		"leader-elect-lease-duration",
-		util.LeaseDuration,
-		"The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled.",
+		0,
+		fmt.Sprintf("The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled. Default: (%s)", defaultLeaderElectionValues.LeaseDuration.Duration),
 	)
 
 	metricsAddress := flag.String(
@@ -74,6 +83,11 @@ func main() {
 	cfg := config.GetConfigOrDie()
 	syncPeriod := 10 * time.Minute
 
+	le := util.GetLeaderElectionConfig(cfg, configv1.LeaderElection{
+		Disable:       !*leaderElect,
+		LeaseDuration: metav1.Duration{Duration: *leaderElectLeaseDuration},
+	})
+
 	opts := manager.Options{
 		MetricsBindAddress:      *metricsAddress,
 		HealthProbeBindAddress:  *healthAddr,
@@ -81,9 +95,9 @@ func main() {
 		LeaderElection:          *leaderElect,
 		LeaderElectionNamespace: *leaderElectResourceNamespace,
 		LeaderElectionID:        "cluster-api-provider-vsphere-leader",
-		LeaseDuration:           leaderElectLeaseDuration,
-		RetryPeriod:             util.TimeDuration(util.RetryPeriod),
-		RenewDeadline:           util.TimeDuration(util.RenewDeadline),
+		LeaseDuration:           &le.LeaseDuration.Duration,
+		RetryPeriod:             &le.RetryPeriod.Duration,
+		RenewDeadline:           &le.RenewDeadline.Duration,
 	}
 
 	if *watchNamespace != "" {
diff --git a/go.mod b/go.mod
index 626da5c4e1161b9d633becd3591835211bcfe45f..c450932567d1c7709fe707d425745b7972d5e2c1 100644
--- a/go.mod
+++ b/go.mod
@@ -11,7 +11,7 @@ require (
 	github.com/onsi/gomega v1.17.0
 	github.com/openshift/api v0.0.0-20211215120111-7c47a5f63470
 	github.com/openshift/client-go v0.0.0-20211209144617-7385dd6338e3
-	github.com/openshift/library-go v0.0.0-20211214183058-58531ccbde67
+	github.com/openshift/library-go v0.0.0-20211220195323-eca2c467c492
 	github.com/operator-framework/operator-sdk v0.5.1-0.20190301204940-c2efe6f74e7b
 	github.com/prometheus/client_golang v1.11.0
 	github.com/spf13/cobra v1.2.1
@@ -98,7 +98,7 @@ require (
 	k8s.io/component-base v0.23.0 // indirect
 	k8s.io/kube-aggregator v0.23.0 // indirect
 	k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect
-	sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
+	sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
 	sigs.k8s.io/kube-storage-version-migrator v0.0.4 // indirect
 	sigs.k8s.io/kustomize/api v0.10.1 // indirect
 	sigs.k8s.io/kustomize/kyaml v0.13.0 // indirect
diff --git a/go.sum b/go.sum
index 22925e801c127079bad7c468f0a7a9ab83b90ba3..78f1f4a6e005c90ed4ae93a9fa50bff2e4c1d70d 100644
--- a/go.sum
+++ b/go.sum
@@ -70,6 +70,7 @@ github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbt
 github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
 github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
 github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/RangelReale/osincli v0.0.0-20160924135400-fababb0555f2/go.mod h1:XyjUkMA8GN+tOOPXvnbi3XuRxWFvTJntqvTFnjmhzbk=
 github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
 github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
 github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
@@ -537,12 +538,11 @@ github.com/openshift/api v0.0.0-20211209135129-c58d9f695577/go.mod h1:DoslCwtqUp
 github.com/openshift/api v0.0.0-20211215120111-7c47a5f63470 h1:kYVTSbYsfLxSBnK8Z2ZN+qgAdclXAf2mYVDyHDfxTZ0=
 github.com/openshift/api v0.0.0-20211215120111-7c47a5f63470/go.mod h1:F/eU6jgr6Q2VhMu1mSpMmygxAELd7+BUxs3NHZ25jV4=
 github.com/openshift/build-machinery-go v0.0.0-20210712174854-1bb7fd1518d3/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE=
-github.com/openshift/build-machinery-go v0.0.0-20210806203541-4ea9b6da3a37/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE=
 github.com/openshift/build-machinery-go v0.0.0-20211213093930-7e33a7eb4ce3/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE=
 github.com/openshift/client-go v0.0.0-20211209144617-7385dd6338e3 h1:SG1aqwleU6bGD0X4mhkTNupjVnByMYYuW4XbnCPavQU=
 github.com/openshift/client-go v0.0.0-20211209144617-7385dd6338e3/go.mod h1:cwhyki5lqBmrT0m8Im+9I7PGFaraOzcYPtEz93RcsGY=
-github.com/openshift/library-go v0.0.0-20211214183058-58531ccbde67 h1:wNd5jvgf9kXsyT+z11aBlh5spqKPNwsQKplrJRx4nsc=
-github.com/openshift/library-go v0.0.0-20211214183058-58531ccbde67/go.mod h1:M/Gi/GUUrMdSS07nrYtTiK43J6/VUAyk/+IfN4ZqUY4=
+github.com/openshift/library-go v0.0.0-20211220195323-eca2c467c492 h1:oj/rSQqVWVj6YJUydZwLz2frrJreiyI4oa9g/YPgMsM=
+github.com/openshift/library-go v0.0.0-20211220195323-eca2c467c492/go.mod h1:4UQ9snU1vg53fyTpHQw3vLPiAxI8ub5xrc+y8KPQQFs=
 github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
 github.com/operator-framework/operator-sdk v0.5.1-0.20190301204940-c2efe6f74e7b h1:Q1q8w51pAZdx6LEkaYdSbUaaEOHXTyTXLhtGgIiKaiA=
 github.com/operator-framework/operator-sdk v0.5.1-0.20190301204940-c2efe6f74e7b/go.mod h1:iVyukRkam5JZa8AnjYf+/G3rk7JI1+M6GsU0sq0B9NA=
@@ -1271,9 +1271,8 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.25/go.mod h1:Mlj9PN
 sigs.k8s.io/controller-runtime v0.11.0 h1:DqO+c8mywcZLFJWILq4iktoECTyn30Bkj0CwgqMpZWQ=
 sigs.k8s.io/controller-runtime v0.11.0/go.mod h1:KKwLiTooNGu+JmLZGn9Sl3Gjmfj66eMbCQznLP5zcqA=
 sigs.k8s.io/controller-tools v0.2.8/go.mod h1:9VKHPszmf2DHz/QmHkcfZoewO6BL7pPs9uAiBVsaJSE=
+sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s=
 sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
-sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y=
-sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY=
 sigs.k8s.io/kube-storage-version-migrator v0.0.4 h1:qsCecgZHgdismlTt8xCmS/3numvpxrj58RWJeIg76wc=
 sigs.k8s.io/kube-storage-version-migrator v0.0.4/go.mod h1:mXfSLkx9xbJHQsgNDDUZK/iQTs2tMbx/hsJlWe6Fthw=
 sigs.k8s.io/kustomize/api v0.10.1 h1:KgU7hfYoscuqag84kxtzKdEC3mKMb99DPI3a0eaV1d0=
diff --git a/pkg/util/durations.go b/pkg/util/durations.go
deleted file mode 100644
index efbb64d2f4278322c85e2d63028f402527f50f7a..0000000000000000000000000000000000000000
--- a/pkg/util/durations.go
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
-Copyright 2021 Red Hat.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package util
-
-import (
-	"time"
-)
-
-// The default durations for the leader election operations.
-const (
-	// LeaseDuration is the default duration for the leader election lease.
-	LeaseDuration = 137 * time.Second
-	// RenewDeadline is the default duration for the leader renewal.
-	RenewDeadline = 107 * time.Second
-	// RetryPeriod is the default duration for the leader election retrial.
-	RetryPeriod = 26 * time.Second
-)
-
-// TimeDuration returns a pointer to the time.Duration.
-func TimeDuration(i time.Duration) *time.Duration {
-	return &i
-}
diff --git a/pkg/util/leaderelection.go b/pkg/util/leaderelection.go
new file mode 100644
index 0000000000000000000000000000000000000000..3f69feb35832f7c23871b817df1313455ea7c4c9
--- /dev/null
+++ b/pkg/util/leaderelection.go
@@ -0,0 +1,56 @@
+/*
+Copyright 2021 Red Hat.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+	"context"
+
+	configv1 "github.com/openshift/api/config/v1"
+	"github.com/openshift/library-go/pkg/config/clusterstatus"
+	"github.com/openshift/library-go/pkg/config/leaderelection"
+	"k8s.io/client-go/rest"
+	"k8s.io/klog/v2"
+)
+
+// GetLeaderElectionConfig returns leader election configs defaults based on the cluster topology
+func GetLeaderElectionConfig(restcfg *rest.Config, leaderElection configv1.LeaderElection) configv1.LeaderElection {
+
+	userExplicitlySetLeaderElectionValues := leaderElection.LeaseDuration.Duration != 0 ||
+		leaderElection.RenewDeadline.Duration != 0 ||
+		leaderElection.RetryPeriod.Duration != 0
+
+	// Defaults follow conventions
+	// https://github.com/openshift/enhancements/blob/master/CONVENTIONS.md#high-availability
+	defaultLeaderElection := leaderelection.LeaderElectionDefaulting(
+		leaderElection,
+		"", "",
+	)
+
+	// If user has not supplied any leader election values and leader election is not disabled
+	// Fetch cluster infra status to determine if we should be using SNO LE config
+	if !userExplicitlySetLeaderElectionValues && !leaderElection.Disable {
+		if infra, err := clusterstatus.GetClusterInfraStatus(context.TODO(), restcfg); err == nil && infra != nil {
+			if infra.ControlPlaneTopology == configv1.SingleReplicaTopologyMode {
+				return leaderelection.LeaderElectionSNOConfig(defaultLeaderElection)
+			}
+		} else {
+			klog.Warningf("unable to get cluster infrastructure status, using HA cluster values for leader election: %v", err)
+		}
+	}
+
+	return defaultLeaderElection
+}
diff --git a/pkg/webhooks/v1beta1_suite_test.go b/pkg/webhooks/v1beta1_suite_test.go
index 0227c9f750a54b3548adfa1c085f515036372aca..815437cd1d8516195843db1325dc49f5ca97c06c 100644
--- a/pkg/webhooks/v1beta1_suite_test.go
+++ b/pkg/webhooks/v1beta1_suite_test.go
@@ -29,10 +29,10 @@ import (
 
 	admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
 	corev1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 
 	osconfigv1 "github.com/openshift/api/config/v1"
 	machinev1 "github.com/openshift/api/machine/v1beta1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/client-go/kubernetes/scheme"
 	"k8s.io/client-go/rest"
 	"sigs.k8s.io/controller-runtime/pkg/client"
diff --git a/vendor/github.com/openshift/library-go/pkg/config/clusterstatus/clusterstatus.go b/vendor/github.com/openshift/library-go/pkg/config/clusterstatus/clusterstatus.go
new file mode 100644
index 0000000000000000000000000000000000000000..dbffe623765b4b5077ceeb1970192f7c91db88d7
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/config/clusterstatus/clusterstatus.go
@@ -0,0 +1,29 @@
+package clusterstatus
+
+import (
+	"context"
+	"fmt"
+
+	configv1 "github.com/openshift/api/config/v1"
+	openshiftcorev1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+	"k8s.io/client-go/rest"
+)
+
+const infraResourceName = "cluster"
+
+func GetClusterInfraStatus(ctx context.Context, restClient *rest.Config) (*configv1.InfrastructureStatus, error) {
+	client, err := openshiftcorev1.NewForConfig(restClient)
+	if err != nil {
+		return nil, err
+	}
+	infra, err := client.Infrastructures().Get(ctx, infraResourceName, metav1.GetOptions{})
+	if err != nil {
+		return nil, err
+	}
+	if infra == nil {
+		return nil, fmt.Errorf("getting resource Infrastructure (name: %s) succeeded but object was nil", infraResourceName)
+	}
+	return &infra.Status, nil
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/config/leaderelection/leaderelection.go b/vendor/github.com/openshift/library-go/pkg/config/leaderelection/leaderelection.go
new file mode 100644
index 0000000000000000000000000000000000000000..5cec68257b300064faead75713571b75f1892f15
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/config/leaderelection/leaderelection.go
@@ -0,0 +1,154 @@
+package leaderelection
+
+import (
+	"fmt"
+	"io/ioutil"
+	"os"
+	"strings"
+	"time"
+
+	"k8s.io/klog/v2"
+
+	corev1 "k8s.io/api/core/v1"
+	"k8s.io/apimachinery/pkg/util/uuid"
+	"k8s.io/client-go/kubernetes"
+	clientgoscheme "k8s.io/client-go/kubernetes/scheme"
+	v1core "k8s.io/client-go/kubernetes/typed/core/v1"
+	"k8s.io/client-go/rest"
+	"k8s.io/client-go/tools/leaderelection"
+	"k8s.io/client-go/tools/leaderelection/resourcelock"
+	"k8s.io/client-go/tools/record"
+
+	configv1 "github.com/openshift/api/config/v1"
+)
+
+// ToConfigMapLeaderElection returns a leader election config that you just need to fill in the Callback for.  Don't forget the callbacks!
+func ToConfigMapLeaderElection(clientConfig *rest.Config, config configv1.LeaderElection, component, identity string) (leaderelection.LeaderElectionConfig, error) {
+	kubeClient, err := kubernetes.NewForConfig(clientConfig)
+	if err != nil {
+		return leaderelection.LeaderElectionConfig{}, err
+	}
+
+	if len(identity) == 0 {
+		if hostname, err := os.Hostname(); err != nil {
+			// on errors, make sure we're unique
+			identity = string(uuid.NewUUID())
+		} else {
+			// add a uniquifier so that two processes on the same host don't accidentally both become active
+			identity = hostname + "_" + string(uuid.NewUUID())
+		}
+	}
+	if len(config.Namespace) == 0 {
+		return leaderelection.LeaderElectionConfig{}, fmt.Errorf("namespace may not be empty")
+	}
+	if len(config.Name) == 0 {
+		return leaderelection.LeaderElectionConfig{}, fmt.Errorf("name may not be empty")
+	}
+
+	eventBroadcaster := record.NewBroadcaster()
+	eventBroadcaster.StartLogging(klog.Infof)
+	eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
+	eventRecorder := eventBroadcaster.NewRecorder(clientgoscheme.Scheme, corev1.EventSource{Component: component})
+	rl, err := resourcelock.New(
+		resourcelock.ConfigMapsResourceLock,
+		config.Namespace,
+		config.Name,
+		kubeClient.CoreV1(),
+		kubeClient.CoordinationV1(),
+		resourcelock.ResourceLockConfig{
+			Identity:      identity,
+			EventRecorder: eventRecorder,
+		})
+	if err != nil {
+		return leaderelection.LeaderElectionConfig{}, err
+	}
+
+	return leaderelection.LeaderElectionConfig{
+		Lock:            rl,
+		ReleaseOnCancel: true,
+		LeaseDuration:   config.LeaseDuration.Duration,
+		RenewDeadline:   config.RenewDeadline.Duration,
+		RetryPeriod:     config.RetryPeriod.Duration,
+		Callbacks: leaderelection.LeaderCallbacks{
+			OnStoppedLeading: func() {
+				defer os.Exit(0)
+				klog.Warningf("leader election lost")
+			},
+		},
+	}, nil
+}
+
+// LeaderElectionDefaulting applies what we think are reasonable defaults.  It does not mutate the original.
+// We do defaulting outside the API so that we can change over time and know whether the user intended to override our values
+// as opposed to simply getting the defaulted serialization at some point.
+func LeaderElectionDefaulting(config configv1.LeaderElection, defaultNamespace, defaultName string) configv1.LeaderElection {
+	ret := *(&config).DeepCopy()
+
+	// We want to be able to tolerate 60s of kube-apiserver disruption without causing pod restarts.
+	// We want the graceful lease re-acquisition fairly quick to avoid waits on new deployments and other rollouts.
+	// We want a single set of guidance for nearly every lease in openshift.  If you're special, we'll let you know.
+	// 1. clock skew tolerance is leaseDuration-renewDeadline == 30s
+	// 2. kube-apiserver downtime tolerance is == 78s
+	//      lastRetry=floor(renewDeadline/retryPeriod)*retryPeriod == 104
+	//      downtimeTolerance = lastRetry-retryPeriod == 78s
+	// 3. worst non-graceful lease acquisition is leaseDuration+retryPeriod == 163s
+	// 4. worst graceful lease acquisition is retryPeriod == 26s
+	if ret.LeaseDuration.Duration == 0 {
+		ret.LeaseDuration.Duration = 137 * time.Second
+	}
+
+	if ret.RenewDeadline.Duration == 0 {
+		// this gives 107/26=4 retries and allows for 137-107=30 seconds of clock skew
+		// if the kube-apiserver is unavailable for 60s starting just before t=26 (the first renew),
+		// then we will retry on 26s intervals until t=104 (kube-apiserver came back up at 86), and there will
+		// be 33 seconds of extra time before the lease is lost.
+		ret.RenewDeadline.Duration = 107 * time.Second
+	}
+	if ret.RetryPeriod.Duration == 0 {
+		ret.RetryPeriod.Duration = 26 * time.Second
+	}
+	if len(ret.Namespace) == 0 {
+		if len(defaultNamespace) > 0 {
+			ret.Namespace = defaultNamespace
+		} else {
+			// Fall back to the namespace associated with the service account token, if available
+			if data, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil {
+				if ns := strings.TrimSpace(string(data)); len(ns) > 0 {
+					ret.Namespace = ns
+				}
+			}
+		}
+	}
+	if len(ret.Name) == 0 {
+		ret.Name = defaultName
+	}
+	return ret
+}
+
+// LeaderElectionSNOConfig uses the formula derived in LeaderElectionDefaulting with increased
+// retry period and lease duration for SNO clusters that have limited resources.
+// This method does not respect the passed in LeaderElection config and the returned object will have values
+// that are overridden with SNO environments in mind.
+// This method should only be called when running in an SNO Cluster.
+func LeaderElectionSNOConfig(config configv1.LeaderElection) configv1.LeaderElection {
+
+	// We want to make sure we respect a 30s clock skew as well as a 4 retry attempt with out making
+	// leader election ineffectual while still having some small performance gain by limiting calls against
+	// the api server.
+
+	// 1. clock skew tolerance is leaseDuration-renewDeadline == 30s
+	// 2. kube-apiserver downtime tolerance is == 180s
+	//      lastRetry=floor(renewDeadline/retryPeriod)*retryPeriod == 240
+	//      downtimeTolerance = lastRetry-retryPeriod == 180s
+	// 3. worst non-graceful lease acquisition is leaseDuration+retryPeriod == 330s
+	// 4. worst graceful lease acquisition is retryPeriod == 60s
+
+	ret := *(&config).DeepCopy()
+	// 270-240 = 30s of clock skew tolerance
+	ret.LeaseDuration.Duration = 270 * time.Second
+	// 240/60 = 4 retries attempts before leader is lost.
+	ret.RenewDeadline.Duration = 240 * time.Second
+	// With 60s retry config we aim to maintain 30s of clock skew as well as 4 retry attempts.
+	ret.RetryPeriod.Duration = 60 * time.Second
+	return ret
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go
index 2f952f5c598a72d163472c26fbc7b1aa81e99b0c..1ffee4f80db22ffbed818c7b7cd7814aee904616 100644
--- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go
@@ -337,16 +337,10 @@ func ApplyConfigMapImproved(ctx context.Context, client coreclientv1.ConfigMapsG
 
 // ApplySecret merges objectmeta, requires data
 func ApplySecretImproved(ctx context.Context, client coreclientv1.SecretsGetter, recorder events.Recorder, requiredInput *corev1.Secret, cache ResourceCache) (*corev1.Secret, bool, error) {
+	// copy the stringData to data.  Error on a data content conflict inside required.  This is usually a bug.
+
 	existing, err := client.Secrets(requiredInput.Namespace).Get(ctx, requiredInput.Name, metav1.GetOptions{})
-	if apierrors.IsNotFound(err) {
-		requiredCopy := requiredInput.DeepCopy()
-		actual, err := client.Secrets(requiredCopy.Namespace).
-			Create(ctx, resourcemerge.WithCleanLabelsAndAnnotations(requiredCopy).(*corev1.Secret), metav1.CreateOptions{})
-		reportCreateEvent(recorder, requiredCopy, err)
-		cache.UpdateCachedResourceMetadata(requiredInput, actual)
-		return actual, true, err
-	}
-	if err != nil {
+	if err != nil && !apierrors.IsNotFound(err) {
 		return nil, false, err
 	}
 
@@ -354,7 +348,6 @@ func ApplySecretImproved(ctx context.Context, client coreclientv1.SecretsGetter,
 		return existing, false, nil
 	}
 
-	// copy the stringData to data.  Error on a data content conflict inside required.  This is usually a bug.
 	required := requiredInput.DeepCopy()
 	if required.Data == nil {
 		required.Data = map[string][]byte{}
@@ -369,6 +362,18 @@ func ApplySecretImproved(ctx context.Context, client coreclientv1.SecretsGetter,
 	}
 	required.StringData = nil
 
+	if apierrors.IsNotFound(err) {
+		requiredCopy := required.DeepCopy()
+		actual, err := client.Secrets(requiredCopy.Namespace).
+			Create(ctx, resourcemerge.WithCleanLabelsAndAnnotations(requiredCopy).(*corev1.Secret), metav1.CreateOptions{})
+		reportCreateEvent(recorder, requiredCopy, err)
+		cache.UpdateCachedResourceMetadata(requiredInput, actual)
+		return actual, true, err
+	}
+	if err != nil {
+		return nil, false, err
+	}
+
 	existingCopy := existing.DeepCopy()
 
 	resourcemerge.EnsureObjectMeta(resourcemerge.BoolPtr(false), &existingCopy.ObjectMeta, required.ObjectMeta)
@@ -397,7 +402,7 @@ func ApplySecretImproved(ctx context.Context, client coreclientv1.SecretsGetter,
 	}
 
 	if equality.Semantic.DeepEqual(existingCopy, existing) {
-		cache.UpdateCachedResourceMetadata(required, existingCopy)
+		cache.UpdateCachedResourceMetadata(requiredInput, existingCopy)
 		return existing, false, nil
 	}
 
@@ -431,7 +436,7 @@ func ApplySecretImproved(ctx context.Context, client coreclientv1.SecretsGetter,
 	existingCopy.ResourceVersion = ""
 	actual, err = client.Secrets(required.Namespace).Create(ctx, existingCopy, metav1.CreateOptions{})
 	reportCreateEvent(recorder, existingCopy, err)
-	cache.UpdateCachedResourceMetadata(required, actual)
+	cache.UpdateCachedResourceMetadata(requiredInput, actual)
 	return actual, true, err
 }
 
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/resource_cache.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/resource_cache.go
index 2ff94536e1cb16b925961aab59a65c5eaeec7025..daa1a5e15400d43700d36b9682a44ece5c36c578 100644
--- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/resource_cache.go
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/resource_cache.go
@@ -4,6 +4,7 @@ import (
 	"crypto/md5"
 	"fmt"
 	"io"
+	"reflect"
 
 	"k8s.io/apimachinery/pkg/api/meta"
 	"k8s.io/apimachinery/pkg/runtime"
@@ -42,10 +43,16 @@ func NewResourceCache() *resourceCache {
 var noCache *resourceCache
 
 func getResourceMetadata(obj runtime.Object) (schema.GroupKind, string, string, string, error) {
+	if obj == nil {
+		return schema.GroupKind{}, "", "", "", fmt.Errorf("nil object has no metadata")
+	}
 	metadata, err := meta.Accessor(obj)
 	if err != nil {
 		return schema.GroupKind{}, "", "", "", err
 	}
+	if metadata == nil || reflect.ValueOf(metadata).IsNil() {
+		return schema.GroupKind{}, "", "", "", fmt.Errorf("object has no metadata")
+	}
 	resourceHash := hashOfResourceStruct(obj)
 
 	// retrieve kind, sometimes this can be done via the accesor, sometimes not (depends on the type)
@@ -66,10 +73,16 @@ func getResourceMetadata(obj runtime.Object) (schema.GroupKind, string, string,
 }
 
 func getResourceVersion(obj runtime.Object) (string, error) {
+	if obj == nil {
+		return "", fmt.Errorf("nil object has no resourceVersion")
+	}
 	metadata, err := meta.Accessor(obj)
 	if err != nil {
 		return "", err
 	}
+	if metadata == nil || reflect.ValueOf(metadata).IsNil() {
+		return "", fmt.Errorf("object has no metadata")
+	}
 	rv := metadata.GetResourceVersion()
 	if len(rv) == 0 {
 		return "", fmt.Errorf("missing resourceVersion")
diff --git a/vendor/modules.txt b/vendor/modules.txt
index a70102a0f5513a39459adf6f6d9a4e4c94199dfe..73666c7755d367e53b58f002eb4a6a42d859702c 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -269,9 +269,11 @@ github.com/openshift/client-go/machine/informers/externalversions/internalinterf
 github.com/openshift/client-go/machine/informers/externalversions/machine
 github.com/openshift/client-go/machine/informers/externalversions/machine/v1beta1
 github.com/openshift/client-go/machine/listers/machine/v1beta1
-# github.com/openshift/library-go v0.0.0-20211214183058-58531ccbde67
+# github.com/openshift/library-go v0.0.0-20211220195323-eca2c467c492
 ## explicit; go 1.17
 github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers
+github.com/openshift/library-go/pkg/config/clusterstatus
+github.com/openshift/library-go/pkg/config/leaderelection
 github.com/openshift/library-go/pkg/operator/events
 github.com/openshift/library-go/pkg/operator/resource/resourceapply
 github.com/openshift/library-go/pkg/operator/resource/resourcehash
@@ -998,8 +1000,8 @@ sigs.k8s.io/controller-runtime/pkg/webhook
 sigs.k8s.io/controller-runtime/pkg/webhook/admission
 sigs.k8s.io/controller-runtime/pkg/webhook/conversion
 sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics
-# sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2
-## explicit; go 1.17
+# sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6
+## explicit; go 1.16
 sigs.k8s.io/json
 sigs.k8s.io/json/internal/golang/encoding/json
 # sigs.k8s.io/kube-storage-version-migrator v0.0.4
diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go
index 3a8b64547da3710b53f99a8a803e8ded4f4163dd..a047d981bfea2e487578d559c44caf46fdb577a5 100644
--- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go
+++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go
@@ -234,7 +234,6 @@ type decodeState struct {
 
 	savedStrictErrors []error
 	seenStrictErrors  map[string]struct{}
-	strictFieldStack  []string
 
 	caseSensitive bool
 
@@ -262,8 +261,6 @@ func (d *decodeState) init(data []byte) *decodeState {
 		// Reuse the allocated space for the FieldStack slice.
 		d.errorContext.FieldStack = d.errorContext.FieldStack[:0]
 	}
-	// Reuse the allocated space for the strict FieldStack slice.
-	d.strictFieldStack = d.strictFieldStack[:0]
 	return d
 }
 
@@ -558,12 +555,6 @@ func (d *decodeState) array(v reflect.Value) error {
 		break
 	}
 
-	origStrictFieldStackLen := len(d.strictFieldStack)
-	defer func() {
-		// Reset to original length and reuse the allocated space for the strict FieldStack slice.
-		d.strictFieldStack = d.strictFieldStack[:origStrictFieldStackLen]
-	}()
-
 	i := 0
 	for {
 		// Look ahead for ] - can only happen on first iteration.
@@ -589,7 +580,6 @@ func (d *decodeState) array(v reflect.Value) error {
 			}
 		}
 
-		d.appendStrictFieldStackIndex(i)
 		if i < v.Len() {
 			// Decode into element.
 			if err := d.value(v.Index(i)); err != nil {
@@ -601,8 +591,6 @@ func (d *decodeState) array(v reflect.Value) error {
 				return err
 			}
 		}
-		// Reset to original length and reuse the allocated space for the strict FieldStack slice.
-		d.strictFieldStack = d.strictFieldStack[:origStrictFieldStackLen]
 		i++
 
 		// Next token must be , or ].
@@ -695,7 +683,7 @@ func (d *decodeState) object(v reflect.Value) error {
 					seenKeys = map[string]struct{}{}
 				}
 				if _, seen := seenKeys[fieldName]; seen {
-					d.saveStrictError(d.newFieldError("duplicate field", fieldName))
+					d.saveStrictError(fmt.Errorf("duplicate field %q", fieldName))
 				} else {
 					seenKeys[fieldName] = struct{}{}
 				}
@@ -711,7 +699,7 @@ func (d *decodeState) object(v reflect.Value) error {
 				var seenKeys uint64
 				checkDuplicateField = func(fieldNameIndex int, fieldName string) {
 					if seenKeys&(1<<fieldNameIndex) != 0 {
-						d.saveStrictError(d.newFieldError("duplicate field", fieldName))
+						d.saveStrictError(fmt.Errorf("duplicate field %q", fieldName))
 					} else {
 						seenKeys = seenKeys | (1 << fieldNameIndex)
 					}
@@ -724,7 +712,7 @@ func (d *decodeState) object(v reflect.Value) error {
 						seenIndexes = make([]bool, len(fields.list))
 					}
 					if seenIndexes[fieldNameIndex] {
-						d.saveStrictError(d.newFieldError("duplicate field", fieldName))
+						d.saveStrictError(fmt.Errorf("duplicate field %q", fieldName))
 					} else {
 						seenIndexes[fieldNameIndex] = true
 					}
@@ -744,7 +732,6 @@ func (d *decodeState) object(v reflect.Value) error {
 	if d.errorContext != nil {
 		origErrorContext = *d.errorContext
 	}
-	origStrictFieldStackLen := len(d.strictFieldStack)
 
 	for {
 		// Read opening " of string key or closing }.
@@ -781,7 +768,6 @@ func (d *decodeState) object(v reflect.Value) error {
 			if checkDuplicateField != nil {
 				checkDuplicateField(0, string(key))
 			}
-			d.appendStrictFieldStackKey(string(key))
 		} else {
 			var f *field
 			if i, ok := fields.nameIndex[string(key)]; ok {
@@ -834,9 +820,8 @@ func (d *decodeState) object(v reflect.Value) error {
 				}
 				d.errorContext.FieldStack = append(d.errorContext.FieldStack, f.name)
 				d.errorContext.Struct = t
-				d.appendStrictFieldStackKey(f.name)
 			} else if d.disallowUnknownFields {
-				d.saveStrictError(d.newFieldError("unknown field", string(key)))
+				d.saveStrictError(fmt.Errorf("unknown field %q", key))
 			}
 		}
 
@@ -920,8 +905,6 @@ func (d *decodeState) object(v reflect.Value) error {
 			d.errorContext.FieldStack = d.errorContext.FieldStack[:len(origErrorContext.FieldStack)]
 			d.errorContext.Struct = origErrorContext.Struct
 		}
-		// Reset to original length and reuse the allocated space for the strict FieldStack slice.
-		d.strictFieldStack = d.strictFieldStack[:origStrictFieldStackLen]
 		if d.opcode == scanEndObject {
 			break
 		}
@@ -1158,12 +1141,6 @@ func (d *decodeState) valueInterface() (val interface{}) {
 
 // arrayInterface is like array but returns []interface{}.
 func (d *decodeState) arrayInterface() []interface{} {
-	origStrictFieldStackLen := len(d.strictFieldStack)
-	defer func() {
-		// Reset to original length and reuse the allocated space for the strict FieldStack slice.
-		d.strictFieldStack = d.strictFieldStack[:origStrictFieldStackLen]
-	}()
-
 	var v = make([]interface{}, 0)
 	for {
 		// Look ahead for ] - can only happen on first iteration.
@@ -1172,10 +1149,7 @@ func (d *decodeState) arrayInterface() []interface{} {
 			break
 		}
 
-		d.appendStrictFieldStackIndex(len(v))
 		v = append(v, d.valueInterface())
-		// Reset to original length and reuse the allocated space for the strict FieldStack slice.
-		d.strictFieldStack = d.strictFieldStack[:origStrictFieldStackLen]
 
 		// Next token must be , or ].
 		if d.opcode == scanSkipSpace {
@@ -1193,12 +1167,6 @@ func (d *decodeState) arrayInterface() []interface{} {
 
 // objectInterface is like object but returns map[string]interface{}.
 func (d *decodeState) objectInterface() map[string]interface{} {
-	origStrictFieldStackLen := len(d.strictFieldStack)
-	defer func() {
-		// Reset to original length and reuse the allocated space for the strict FieldStack slice.
-		d.strictFieldStack = d.strictFieldStack[:origStrictFieldStackLen]
-	}()
-
 	m := make(map[string]interface{})
 	for {
 		// Read opening " of string key or closing }.
@@ -1231,15 +1199,12 @@ func (d *decodeState) objectInterface() map[string]interface{} {
 
 		if d.disallowDuplicateFields {
 			if _, exists := m[key]; exists {
-				d.saveStrictError(d.newFieldError("duplicate field", key))
+				d.saveStrictError(fmt.Errorf("duplicate field %q", key))
 			}
 		}
 
 		// Read value.
-		d.appendStrictFieldStackKey(key)
 		m[key] = d.valueInterface()
-		// Reset to original length and reuse the allocated space for the strict FieldStack slice.
-		d.strictFieldStack = d.strictFieldStack[:origStrictFieldStackLen]
 
 		// Next token must be , or }.
 		if d.opcode == scanSkipSpace {
diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/kubernetes_patch.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/kubernetes_patch.go
index cb9ab062717a832e8fd6ed4422cc83991a81ed6d..4b7acd6ef298d4e102884b7cd5d77b36dd4dd530 100644
--- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/kubernetes_patch.go
+++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/kubernetes_patch.go
@@ -18,8 +18,6 @@ package json
 
 import (
 	gojson "encoding/json"
-	"fmt"
-	"strconv"
 	"strings"
 )
 
@@ -71,14 +69,6 @@ func (d *Decoder) DisallowDuplicateFields() {
 	d.d.disallowDuplicateFields = true
 }
 
-func (d *decodeState) newFieldError(msg, field string) error {
-	if len(d.strictFieldStack) > 0 {
-		return fmt.Errorf("%s %q", msg, strings.Join(d.strictFieldStack, "")+"."+field)
-	} else {
-		return fmt.Errorf("%s %q", msg, field)
-	}
-}
-
 // saveStrictError saves a strict decoding error,
 // for reporting at the end of the unmarshal if no other errors occurred.
 func (d *decodeState) saveStrictError(err error) {
@@ -100,24 +90,6 @@ func (d *decodeState) saveStrictError(err error) {
 	d.savedStrictErrors = append(d.savedStrictErrors, err)
 }
 
-func (d *decodeState) appendStrictFieldStackKey(key string) {
-	if !d.disallowDuplicateFields && !d.disallowUnknownFields {
-		return
-	}
-	if len(d.strictFieldStack) > 0 {
-		d.strictFieldStack = append(d.strictFieldStack, ".", key)
-	} else {
-		d.strictFieldStack = append(d.strictFieldStack, key)
-	}
-}
-
-func (d *decodeState) appendStrictFieldStackIndex(i int) {
-	if !d.disallowDuplicateFields && !d.disallowUnknownFields {
-		return
-	}
-	d.strictFieldStack = append(d.strictFieldStack, "[", strconv.Itoa(i), "]")
-}
-
 // UnmarshalStrictError holds errors resulting from use of strict disallow___ decoder directives.
 // If this is returned from Unmarshal(), it means the decoding was successful in all other respects.
 type UnmarshalStrictError struct {